source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
LAGraphX_bc_batch3.c | //------------------------------------------------------------------------------
// LAGraphX_bc_batch: Brandes' algorithm for computing betweeness centrality
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bc_batch: Batch algorithm for computing betweeness centrality.
// Contributed by Scott Kolodziej and Tim Davis, Texas A&M University.
// Adapted from GraphBLAS C API Spec, Appendix B.4.
// LAGraph_bc_batch computes an approximation of the betweenness centrality of
// all nodes in a graph using a batched version of Brandes' algorithm.
// ____
// \ sigma(s,t | i)
// Betweenness centrality = \ ----------------
// of node i / sigma(s,t)
// /___
// s ≠ i ≠ t
//
// Where sigma(s,t) is the total number of shortest paths from node s to
// node t, and sigma(s,t | i) is the total number of shortest paths from
// node s to node t that pass through node i.
//
// Note that the true betweenness centrality requires computing shortest paths
// from all nodes s to all nodes t (or all-pairs shortest paths), which can be
// expensive to compute. By using a reasonably sized subset of source nodes, an
// approximation can be made.
//
// LAGraph_bc_batch performs simultaneous breadth-first searches of the entire
// graph starting at a given set of source nodes. This pass discovers all
// shortest paths from the source nodes to all other nodes in the graph. After
// the BFS is complete, the number of shortest paths that pass through a given
// node is tallied by reversing the traversal. From this, the (approximate)
// betweenness centrality is computed.
// A_matrix represents the graph. It must be square, and can be unsymmetric.
// Self-edges are OK.
#define DO_PULL 0
// If DO_PULL is #defined, the algorithm does each GrB_mxm twice: once with the
// "pull" (dot product method internally in GraphBLAS) and ones with the "push"
// (the saxpy method in GraphBLAS). Then it pretends to have a perfect
// heuristic by taking the min of both times to compute the "pushpull" time.
// This is of course unrealistic, but it's a lower bound on any heuristc that
// tries to select the correct method at each step.
//------------------------------------------------------------------------------
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_WORK \
{ \
GrB_free(&frontier); \
GrB_free(&paths); \
LAGraph_free(paths_dense); \
LAGraph_free(bc_update_dense); \
GrB_free(&t1); \
GrB_free(&t2); \
GrB_free (&pull_descriptor) ; \
if (S_array != NULL) \
{ \
for (int64_t i = 0; i < n ; i++) \
{ \
if (S_array [i] == NULL) break ; \
GrB_free (&(S_array [i])) ; \
} \
free (S_array) ; \
} \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK; \
GrB_free (centrality); \
}
// TODO add LAGraph_PLUS_SECOND_FP* to LAGraph.h.
#if 0
// select FP64
#define REAL_t double
#define LAGr_REAL_TYPE GrB_FP64
#define LAGr_PLUS_SECOND_REAL GxB_PLUS_SECOND_FP64
#else
// select FP32
#define REAL_t float
#define LAGr_REAL_TYPE GrB_FP32
#define LAGr_PLUS_SECOND_REAL GxB_PLUS_SECOND_FP32
#endif
GrB_Info LAGraphX_bc_batch3 // betweeness centrality, batch algorithm
(
GrB_Vector *centrality, // centrality(i): betweeness centrality of node i
const GrB_Matrix A_matrix, // input graph
const GrB_Matrix AT_matrix, // A'
const GrB_Index *sources, // source vertices for shortest paths
int32_t num_sources, // number of source vertices (length of s)
double timing [3]
)
{
GrB_Info info ;
GrB_Descriptor pull_descriptor = NULL ;
// Frontier matrix
// Stores # of shortest paths to vertices at current BFS depth
GrB_Matrix frontier = NULL;
// Array of BFS search matrices
// S_array[i] is a matrix that stores the depth at which each vertex is
// first seen thus far in each BFS at the current depth i. Each column
// corresponds to a BFS traversal starting from a source node.
GrB_Matrix *S_array = NULL;
// Paths matrix holds the number of shortest paths for each node and
// starting node discovered so far. Starts out sparse and becomes denser.
GrB_Matrix paths = NULL;
REAL_t *paths_dense = NULL;
// Update matrix for betweenness centrality, values for each node for
// each starting node. Treated as dense for efficiency.
REAL_t *bc_update_dense = NULL;
GrB_Matrix t1 = NULL;
GrB_Matrix t2 = NULL;
GrB_Index n; // Number of nodes in the graph
(*centrality) = NULL;
//--------------------------------------------------------------------------
double tic [2];
LAGraph_tic (tic);
GxB_Format_Value a_fmt, at_fmt ;
LAGRAPH_OK (GxB_get (A_matrix, GxB_FORMAT, &a_fmt )) ;
LAGRAPH_OK (GxB_get (AT_matrix, GxB_FORMAT, &at_fmt)) ;
if (a_fmt != GxB_BY_ROW || at_fmt != GxB_BY_ROW)
{
LAGRAPH_ERROR ("A and AT must be stored by row", GrB_INVALID_VALUE) ;
}
int nthreads ;
GxB_get (GxB_NTHREADS, &nthreads) ;
GrB_Index* Sp = NULL;
GrB_Index* Si = NULL;
REAL_t *Sx = NULL;
GrB_Index* Tp = NULL;
GrB_Index* Ti = NULL;
REAL_t *Tx = NULL;
GrB_Index num_rows, num_cols, nnz, anz ;
GrB_Type type ;
LAGr_Matrix_nrows (&n, A_matrix) ; // # of nodes
LAGr_Matrix_nvals (&anz, A_matrix) ; // # of edges
double d = ((double) anz) / ((double) n) ; // average degree
// descriptor for "pull" method: LAGraph_desc_oocr + dot
GrB_Descriptor_new (&pull_descriptor) ;
GrB_Descriptor_set (pull_descriptor, GrB_MASK, GrB_SCMP) ;
GrB_Descriptor_set (pull_descriptor, GrB_OUTP, GrB_REPLACE) ;
GrB_Descriptor_set (pull_descriptor, GxB_AxB_METHOD, GxB_AxB_DOT) ;
// Initialize paths to source vertices with ones
// paths[s[i],i]=1 for i=[0, ..., num_sources)
if (sources == GrB_ALL)
{
num_sources = n; // TODO delete this option
}
const GrB_Index nnz_dense = n * num_sources ;
double ns = num_sources ;
LAGr_Matrix_new(&paths, LAGr_REAL_TYPE, n, num_sources);
GxB_set(paths, GxB_FORMAT, GxB_BY_COL);
// make paths dense
LAGr_assign (paths, NULL, NULL, 0, GrB_ALL, n, GrB_ALL, num_sources, NULL) ;
// Force resolution of pending tuples
GrB_Index ignore;
GrB_Matrix_nvals(&ignore, paths);
if (sources == GrB_ALL)
{
// TODO: remove this option
for (GrB_Index i = 0; i < num_sources; ++i)
{
// paths [i,i] = 1
LAGr_Matrix_setElement(paths, (REAL_t) 1, i, i);
}
}
else
{
for (GrB_Index i = 0; i < num_sources; ++i)
{
// paths [s[i],i] = 1
LAGr_Matrix_setElement(paths, (REAL_t) 1, sources[i], i);
}
}
// Create frontier matrix and initialize to outgoing nodes from
// all source nodes
LAGr_Matrix_new(&frontier, LAGr_REAL_TYPE, n, num_sources);
GxB_set(frontier, GxB_FORMAT, GxB_BY_COL);
// AT = A'
// frontier <!paths> = AT (:,sources)
// TODO: use mxm, so A_matrix values are ignored.
LAGr_extract(frontier, paths, GrB_NULL, A_matrix, GrB_ALL, n, sources,
num_sources, LAGraph_desc_tocr);
// Allocate memory for the array of S matrices
S_array = (GrB_Matrix*) LAGraph_calloc (n, sizeof(GrB_Matrix));
if (S_array == NULL)
{
// out of memory
LAGRAPH_FREE_ALL;
return (GrB_OUT_OF_MEMORY);
}
//--------------------------------------------------------------------------
// Breadth-first search stage
//--------------------------------------------------------------------------
GrB_Index frontier_size = 0 ; // size of current frontier
LAGr_Matrix_nvals (&frontier_size, frontier) ;
GrB_Index seen = 0 ; // total # of nodes seen * (# sources)
double time_1 = LAGraph_toc (tic) ;
// printf (" init: %g\n", time_1) ;
double phase1_other_time = 0 ;
double phase1_allpush_time = 0 ;
double phase1_allpull_time = 0 ;
double phase1_pushpull_time = 0 ;
int nth = LAGRAPH_MIN (nthreads, num_sources) ;
int64_t depth = 0; // Initial BFS depth
do
{
LAGraph_tic (tic);
// printf ("depth: %g\n", (double) depth) ;
// Create the current search matrix - one column for each source/BFS
LAGr_Matrix_new (&(S_array[depth]), GrB_BOOL, n, num_sources) ;
GxB_set (S_array[depth], GxB_FORMAT, GxB_BY_COL) ;
// Copy the current frontier to S
LAGr_apply (S_array[depth], GrB_NULL, GrB_NULL, GrB_IDENTITY_BOOL,
frontier, GrB_NULL) ;
//=== Accumulate path counts: paths += frontier ========================
// Export paths
int64_t paths_nonempty ;
GxB_Matrix_export_CSC(&paths, &type, &num_rows, &num_cols, &nnz,
&paths_nonempty, &Sp, &Si, (void **) &Sx, GrB_NULL);
// Export frontier
int64_t frontier_nonempty ;
GxB_Matrix_export_CSC(&frontier, &type, &num_rows, &num_cols, &nnz,
&frontier_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL);
// Use frontier pattern to update dense paths
#pragma omp parallel for num_threads(nth)
for (int64_t col = 0; col < num_sources; col++)
{
for (GrB_Index p = Tp[col]; p < Tp[col+1]; p++)
{
GrB_Index row = Ti[p];
Sx [col * n + row] += Tx [p];
}
}
// Import frontier
GxB_Matrix_import_CSC(&frontier, LAGr_REAL_TYPE, n, num_sources, nnz,
frontier_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL);
// Import paths
GxB_Matrix_import_CSC(&paths, LAGr_REAL_TYPE, n, num_sources,
nnz_dense, paths_nonempty, &Sp, &Si, (void **) &Sx, GrB_NULL);
phase1_other_time += LAGraph_toc (tic) ;
//=== Update frontier: frontier<!paths>=A’ +.∗ frontier ================
seen += frontier_size ; // # nonzeros in paths array
/*
double u = (n * ns - seen) ; // # zeros in paths array
double f = frontier_size / ns ;
double push_work_estimate = d * frontier_size ;
double pull_work_estimate = u * fmin (d + f, d * log2 (f)) ;
printf ("\n1: d: %g f: %g u: %g ns: %g "
"push: %g pull: %g pull/push %g\n", d, f, u, ns,
push_work_estimate, pull_work_estimate,
pull_work_estimate / push_work_estimate) ;
*/
double pull_time = INFINITY ;
#if DO_PULL
GrB_Matrix frontier2 = NULL ;
GrB_Matrix_dup (&frontier2, frontier) ;
// uses the "pull" method (dot), because AT_matrix is stored by
// row, and frontier is stored by column.
LAGraph_tic (tic);
LAGr_mxm(frontier2, paths, GrB_NULL, LAGr_PLUS_SECOND_REAL,
AT_matrix, frontier2, pull_descriptor) ;
pull_time = LAGraph_toc (tic) ;
// printf ("1: pull_time: %g sec\n", pull_time) ;
GrB_free (&frontier2) ;
#endif
phase1_allpull_time += pull_time ;
// uses the "push" method (saxpy)
LAGraph_tic (tic);
LAGr_mxm(frontier, paths, GrB_NULL, LAGr_PLUS_SECOND_REAL,
A_matrix, frontier, LAGraph_desc_tocr);
double push_time = LAGraph_toc (tic) ;
// printf ("1: push_time: %g sec, pull/push %g\n",
// push_time, pull_time/push_time) ;
phase1_allpush_time += push_time ;
// assume a perfect pushpull heuristic
double pushpull_time = fmin (pull_time, push_time) ;
phase1_pushpull_time += pushpull_time ;
//=== Find the new frontier size =======================================
LAGraph_tic (tic);
LAGr_Matrix_nvals (&frontier_size, frontier) ;
depth = depth + 1;
phase1_other_time += LAGraph_toc (tic) ;
} while (frontier_size > 0) ; // Repeat until the frontier is empty
// printf (" 1st mxm allpush: %g\n", phase1_allpush_time) ;
#if DO_PULL
printf (" 1st mxm allpull: %g\n", phase1_allpull_time) ;
printf (" 1st mxm pushpull: %g\n", phase1_pushpull_time) ;
#endif
// printf (" 1st other: %g\n", phase1_other_time) ;
LAGraph_tic (tic);
//--------------------------------------------------------------------------
// Betweenness centrality computation phase
//--------------------------------------------------------------------------
// Create the dense update matrix and initialize it to 1
// We will store it column-wise (col * p + row)
bc_update_dense = LAGraph_malloc(nnz_dense, sizeof(REAL_t));
#pragma omp parallel for num_threads(nthreads)
for (GrB_Index nz = 0; nz < nnz_dense; nz++)
{
bc_update_dense[nz] = 1.0;
}
// By this point, paths is (mostly) dense.
// Create a dense version of the GraphBLAS paths matrix
int64_t paths_nonempty ;
GxB_Matrix_export_CSC(&paths, &type, &num_rows, &num_cols, &nnz,
&paths_nonempty, &Sp, &Si, (void **) &paths_dense, GrB_NULL);
// Throw away the "sparse" version of paths
LAGraph_free(Sp);
LAGraph_free(Si);
// Create temporary workspace matrix
LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources);
GxB_set(t2, GxB_FORMAT, GxB_BY_COL);
double time_3 = LAGraph_toc (tic) ;
double phase2_other_time = 0 ;
double phase2_allpush_time = 0 ;
double phase2_allpull_time = 0 ;
double phase2_pushpull_time = 0 ;
// Backtrack through the BFS and compute centrality updates for each vertex
for (int64_t i = depth - 1; i > 0; i--)
{
// Add contributions by successors and mask with that BFS level's
// frontier
LAGraph_tic (tic);
// printf ("back: %g\n", (double) i) ;
/*
GrB_Index prior_size ;
GrB_Matrix_nvals (&prior_size, S_array [i-1]) ;
GrB_Matrix_nvals (&frontier_size, S_array [i]) ;
double u = prior_size ; // # entries in the mask
double f = frontier_size / ns ;
double push_work_estimate = d * frontier_size ;
double pull_work_estimate = u * fmin (d + f, d * log2 (f)) ;
printf ("\n2: d: %g f: %g u: %g ns: %g "
"push: %g pull: %g pull/push %g\n", d, f, u, ns,
push_work_estimate, pull_work_estimate,
pull_work_estimate / push_work_estimate) ;
*/
//=== temp<S_array[i]> = bc_update ./ paths ============================
// Export the pattern of S_array[i]
void *Bx ;
int64_t S_nonempty ;
GxB_Matrix_export_CSC(&(S_array[i]), &type, &num_rows, &num_cols, &nnz,
&S_nonempty, &Sp, &Si, &Bx, GrB_NULL);
// Compute Tx = bc_update ./ paths_dense for all elements of S_array
// Build the Tp and Ti vectors, too.
Tp = LAGraph_malloc(num_sources+1, sizeof(GrB_Index));
Ti = LAGraph_malloc(nnz, sizeof(GrB_Index));
Tx = LAGraph_malloc(nnz, sizeof(REAL_t));
#pragma omp parallel for num_threads(nthreads)
for (int64_t col = 0; col < num_sources; col++)
{
Tp[col] = Sp[col];
for (GrB_Index p = Sp[col]; p < Sp[col+1]; p++)
{
// Compute Tx by eWiseMult of dense matrices
GrB_Index row = Ti[p] = Si[p];
Tx [p] = bc_update_dense [col * n + row]
/ paths_dense [col * n + row] ;
}
}
Tp[num_sources] = Sp[num_sources];
// Restore S_array[i] by importing it
GxB_Matrix_import_CSC(&(S_array[i]), GrB_BOOL, num_rows, num_cols,
nnz, S_nonempty, &Sp, &Si, &Bx, GrB_NULL);
// Create a GraphBLAS matrix t1 from Tp, Ti, Tx
// The row/column indices are the pattern r/c from S_array[i]
GxB_Matrix_import_CSC(&t1, LAGr_REAL_TYPE, n, num_sources, nnz,
S_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL);
phase2_other_time += LAGraph_toc (tic) ;
//=== t2<S_array[i−1]> = (A * t1) ======================================
double pull_time = INFINITY ;
#if DO_PULL
// uses the "pull" method (dot)
LAGraph_tic (tic);
GrB_free (&t2) ;
LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources);
GxB_set(t2, GxB_FORMAT, GxB_BY_COL);
LAGr_mxm(t2, S_array[i-1], GrB_NULL, LAGr_PLUS_SECOND_REAL,
A_matrix, t1, LAGraph_desc_ooor);
pull_time = LAGraph_toc (tic) ;
printf ("2: pull_time: %g sec\n", pull_time) ;
#endif
phase2_allpull_time += pull_time ;
// uses the "push" method (saxpy)
LAGraph_tic (tic);
GrB_free (&t2) ;
LAGr_Matrix_new(&t2, LAGr_REAL_TYPE, n, num_sources);
GxB_set(t2, GxB_FORMAT, GxB_BY_COL);
LAGr_mxm(t2, S_array[i-1], GrB_NULL, LAGr_PLUS_SECOND_REAL,
AT_matrix, t1, LAGraph_desc_toor);
double push_time = LAGraph_toc (tic) ;
// printf ("2: push_time: %g sec, pull/push %g\n", push_time,
// pull_time/push_time) ;
phase2_allpush_time += push_time ;
// assume a perfect pushpull heuristic
double pushpull_time = fmin (pull_time, push_time) ;
phase2_pushpull_time += pushpull_time ;
LAGraph_tic (tic);
GrB_free(&t1);
//=== bc_update += t2 .* paths =========================================
int64_t t2_nonempty ;
GxB_Matrix_export_CSC(&t2, &type, &num_rows, &num_cols, &nnz,
&t2_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL);
#pragma omp parallel for num_threads(nth)
for (int64_t col = 0; col < num_sources; col++)
{
for (GrB_Index p = Tp[col]; p < Tp[col+1]; p++)
{
GrB_Index row = Ti[p];
bc_update_dense [col * n + row] +=
Tx [p] * paths_dense [col * n + row] ;
}
}
// Re-import t2
GxB_Matrix_import_CSC(&t2, LAGr_REAL_TYPE, num_rows, num_cols, nnz,
t2_nonempty, &Tp, &Ti, (void **) &Tx, GrB_NULL);
phase2_other_time += LAGraph_toc (tic) ;
}
// printf (" 2nd mxm allpush: %g\n", phase2_allpush_time) ;
#if DO_PULL
printf (" 2nd mxm allpull: %g\n", phase2_allpull_time) ;
printf (" 2nd mxm pushpull: %g\n", phase2_pushpull_time) ;
#endif
// printf (" 2nd other: %g\n", phase2_other_time + time_3) ;
LAGraph_tic (tic);
//--------------------------------------------------------------------------
// finalize centrality scores
//--------------------------------------------------------------------------
//=== Initialize the centrality array with -(num_sources) to avoid counting
// zero length paths ====================================================
REAL_t *centrality_dense = LAGraph_malloc(n, sizeof(REAL_t));
#pragma omp parallel for num_threads(nthreads)
for (GrB_Index i = 0; i < n; i++)
{
centrality_dense[i] = -num_sources;
}
//=== centrality[i] += bc_update[i,:] ======================================
// Both are dense. We can also take care of the reduction.
#pragma omp parallel for schedule(static) num_threads(nthreads)
for (GrB_Index j = 0; j < n; j++)
{
for (int64_t i = 0; i < num_sources; i++)
{
centrality_dense[j] += bc_update_dense[n * i + j];
}
}
// Build the index vector.
GrB_Index* I = LAGraph_malloc(n, sizeof(GrB_Index));
#pragma omp parallel for num_threads(nthreads)
for (GrB_Index j = 0; j < n; j++)
{
I[j] = j;
}
// Import the dense vector into GraphBLAS and return it.
GxB_Vector_import(centrality, LAGr_REAL_TYPE, n, n, &I,
(void **) ¢rality_dense, GrB_NULL);
LAGRAPH_FREE_WORK;
double time_5 = LAGraph_toc (tic) ;
// printf (" wrapup: %g\n", time_5) ;
timing [0] = time_1
+ (phase1_pushpull_time + phase1_other_time)
+ time_3 + (phase2_pushpull_time + phase2_other_time) + time_5 ;
timing [1] = time_1
+ (phase1_allpush_time + phase1_other_time)
+ time_3 + (phase2_allpush_time + phase2_other_time) + time_5 ;
timing [2] = time_1
+ (phase1_allpull_time + phase1_other_time)
+ time_3 + (phase2_allpull_time + phase2_other_time) + time_5 ;
#if DO_PULL
printf ("Xbc total (pushpull): %g\n", timing [0]) ;
#endif
// printf ("Xbc total (allpush): %g\n", timing [1]) ;
#if DO_PULL
printf ("Xbc total (allpull): %g\n", timing [2]) ;
#endif
return GrB_SUCCESS;
}
|
util.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2018 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file internal/util.h
* \brief Internal utility functions
*/
#ifndef INTERNAL_UTIL_H_
#define INTERNAL_UTIL_H_
namespace qpp {
/**
* \namespace qpp::internal
* \brief Internal utility functions, do not use them directly or modify them
*/
namespace internal {
// integer index to multi-index, use C-style array for speed
// standard lexicographical order, e.g. 00, 01, 10, 11
inline void n2multiidx(idx n, idx numdims, const idx* const dims,
idx* result) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
if (numdims > 0) // numdims equal zero is a no-op
{
idx D = 1;
for (idx i = 0; i < numdims; ++i)
D *= dims[i];
assert(n < D);
}
#endif
// no error checks in release version to improve speed
for (idx i = 0; i < numdims; ++i) {
result[numdims - i - 1] = n % (dims[numdims - i - 1]);
n /= (dims[numdims - i - 1]);
}
}
// silence g++4.9 bogus warning -Warray-bounds and -Wmaybe-uninitialized
// in qpp::internal::multiidx2n()
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
// multi-index to integer index, use C-style array for speed,
// standard lexicographical order, e.g. 00->0, 01->1, 10->2, 11->3
inline idx multiidx2n(const idx* const midx, idx numdims,
const idx* const dims) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(numdims > 0);
#endif
// no error checks in release version to improve speed
// Static allocation for speed!
// double the size for matrices reshaped as vectors
idx part_prod[2 * maxn];
idx result = 0;
part_prod[numdims - 1] = 1;
for (idx i = 1; i < numdims; ++i) {
part_prod[numdims - i - 1] = part_prod[numdims - i] * dims[numdims - i];
result += midx[numdims - i - 1] * part_prod[numdims - i - 1];
}
return result + midx[numdims - 1];
}
#if (__GNUC__ && !__clang__)
#pragma GCC diagnostic pop
#endif
// check square matrix
template <typename Derived>
bool check_square_mat(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == A.cols();
}
// check whether input is a vector or not
template <typename Derived>
bool check_vector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1 || A.cols() == 1;
}
// check whether input is a row vector or not
template <typename Derived>
bool check_rvector(const Eigen::MatrixBase<Derived>& A) {
return A.rows() == 1;
}
// check whether input is a column vector or not
template <typename Derived>
bool check_cvector(const Eigen::MatrixBase<Derived>& A) {
return A.cols() == 1;
}
// check non-zero size of object that supports size() function
template <typename T>
bool check_nonzero_size(const T& x) noexcept {
return x.size() != 0;
}
// check that all sizes match
template <typename T1, typename T2>
bool check_matching_sizes(const T1& lhs, const T2& rhs) noexcept {
return lhs.size() == rhs.size();
}
// check that dims is a valid dimension vector
inline bool check_dims(const std::vector<idx>& dims) {
if (dims.size() == 0)
return false;
return std::find_if(std::begin(dims), std::end(dims),
[dims](idx i) -> bool {
if (i == 0)
return true;
else
return false;
}) == std::end(dims);
}
// check that valid dims match the dimensions
// of valid (non-zero sized) square matrix
template <typename Derived>
bool check_dims_match_mat(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() == A.cols());
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid column vector
template <typename Derived>
bool check_dims_match_cvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.rows() > 0);
assert(A.cols() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
return proddim == static_cast<idx>(A.rows());
}
// check that valid dims match the dimensions of valid row vector
template <typename Derived>
bool check_dims_match_rvect(const std::vector<idx>& dims,
const Eigen::MatrixBase<Derived>& A) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
assert(A.cols() > 0);
assert(A.rows() == 1);
#endif
idx proddim = std::accumulate(std::begin(dims), std::end(dims),
static_cast<idx>(1), std::multiplies<idx>());
;
return proddim == static_cast<idx>(A.cols());
}
// check that all elements in valid dims equal to dim
inline bool check_eq_dims(const std::vector<idx>& dims, idx dim) noexcept {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(dims.size() > 0);
#endif
for (idx i : dims)
if (i != dim)
return false;
return true;
}
// check that subsys is valid with respect to valid dims
inline bool check_subsys_match_dims(const std::vector<idx>& subsys,
const std::vector<idx>& dims) {
// subsys can be empty
// check valid number of subsystems
if (subsys.size() > dims.size())
return false;
// sort the subsystems
std::vector<idx> subsyssort = subsys;
std::sort(std::begin(subsyssort), std::end(subsyssort));
// check duplicates
if (std::unique(std::begin(subsyssort), std::end(subsyssort)) !=
std::end(subsyssort))
return false;
// check range of subsystems
return std::find_if(std::begin(subsyssort), std::end(subsyssort),
[dims](idx i) -> bool {
return i > dims.size() - 1;
}) == std::end(subsyssort);
}
// check matrix is 2 x 2
template <typename Derived>
bool check_qubit_matrix(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 2;
}
// check column vector is 2 x 1
template <typename Derived>
bool check_qubit_cvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 2 && A.cols() == 1;
}
// check row vector is 1 x 2
template <typename Derived>
bool check_qubit_rvector(const Eigen::MatrixBase<Derived>& A) noexcept {
return A.rows() == 1 && A.cols() == 2;
}
// check row vector is 1 x 2 or 2 x 1
template <typename Derived>
bool check_qubit_vector(const Eigen::MatrixBase<Derived>& A) noexcept {
return (A.rows() == 1 && A.cols() == 2) || (A.rows() == 2 && A.cols() == 1);
}
// check valid permutation
inline bool check_perm(const std::vector<idx>& perm) {
if (perm.size() == 0)
return false;
std::vector<idx> ordered(perm.size());
std::iota(std::begin(ordered), std::end(ordered), 0);
return std::is_permutation(std::begin(ordered), std::end(ordered),
std::begin(perm));
}
// Kronecker product of 2 matrices, preserve return type
// internal function for the variadic template function wrapper kron()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar> kron2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::kron()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::kron()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result;
result.resize(Arows * Brows, Acols * Bcols);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
// column major order for speed
for (idx j = 0; j < Acols; ++j)
for (idx i = 0; i < Arows; ++i)
result.block(i * Brows, j * Bcols, Brows, Bcols) = rA(i, j) * rB;
return result;
}
// Direct sum of 2 matrices, preserve return type
// internal function for the variadic template function wrapper dirsum()
template <typename Derived1, typename Derived2>
dyn_mat<typename Derived1::Scalar>
dirsum2(const Eigen::MatrixBase<Derived1>& A,
const Eigen::MatrixBase<Derived2>& B) {
const dyn_mat<typename Derived1::Scalar>& rA = A.derived();
const dyn_mat<typename Derived2::Scalar>& rB = B.derived();
// EXCEPTION CHECKS
// check types
if (!std::is_same<typename Derived1::Scalar,
typename Derived2::Scalar>::value)
throw exception::TypeMismatch("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rA))
throw exception::ZeroSize("qpp::dirsum()");
// check zero-size
if (!internal::check_nonzero_size(rB))
throw exception::ZeroSize("qpp::dirsum()");
// END EXCEPTION CHECKS
idx Acols = static_cast<idx>(rA.cols());
idx Arows = static_cast<idx>(rA.rows());
idx Bcols = static_cast<idx>(rB.cols());
idx Brows = static_cast<idx>(rB.rows());
dyn_mat<typename Derived1::Scalar> result =
dyn_mat<typename Derived1::Scalar>::Zero(Arows + Brows, Acols + Bcols);
result.block(0, 0, Arows, Acols) = rA;
result.block(Arows, Acols, Brows, Bcols) = rB;
return result;
}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T>
// ends the recursion
void variadic_vector_emplace(std::vector<T>&) {}
// may be useful, extracts variadic template argument pack into a std::vector
template <typename T, typename First, typename... Args>
void variadic_vector_emplace(std::vector<T>& v, First&& first, Args&&... args) {
v.emplace_back(std::forward<First>(first));
variadic_vector_emplace(v, std::forward<Args>(args)...);
}
// returns the number of subsystems (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size sz
inline idx get_num_subsys(idx sz, idx d) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(sz > 0);
assert(d > 1);
#endif
return static_cast<idx>(std::llround(std::log2(sz) / std::log2(d)));
}
// returns the dimension of a subsystem (each subsystem assumed of the same
// dimension d) from an object (ket/bra/density matrix) of size sz consisting
// of N subsystems
inline idx get_dim_subsys(idx sz, idx N) {
// error checks only in DEBUG version
#ifndef NDEBUG
assert(N > 0);
assert(sz > 0);
#endif
if (N == 2)
return static_cast<idx>(std::llround(std::sqrt(sz)));
return static_cast<idx>(std::llround(std::pow(sz, 1. / N)));
}
// implementation details for pretty formatting
struct Display_Impl_ {
template <typename T>
// T must support rows(), cols(), operator()(idx, idx) const
std::ostream& display_impl_(const T& A, std::ostream& os,
double chop = qpp::chop) const {
std::ostringstream ostr;
ostr.copyfmt(os); // copy os' state
std::vector<std::string> vstr;
std::string strA;
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j) {
strA.clear(); // clear the temporary string
ostr.clear();
ostr.str(std::string{}); // clear the ostringstream
// convert to complex
double re = static_cast<cplx>(A(i, j)).real();
double im = static_cast<cplx>(A(i, j)).imag();
if (std::abs(re) < chop && std::abs(im) < chop) {
ostr << "0 "; // otherwise segfault on destruction
// if using only vstr.push_back("0 ");
// bug in MATLAB libmx
vstr.push_back(ostr.str());
} else if (std::abs(re) < chop) {
ostr << im;
vstr.push_back(ostr.str() + "i");
} else if (std::abs(im) < chop) {
ostr << re;
vstr.push_back(ostr.str() + " ");
} else {
ostr << re;
strA = ostr.str();
strA += (im > 0 ? " + " : " - ");
ostr.clear();
ostr.str(std::string()); // clear
ostr << std::abs(im);
strA += ostr.str();
strA += "i";
vstr.push_back(strA);
}
}
}
// determine the maximum lenght of the entries in each column
std::vector<idx> maxlengthcols(A.cols(), 0);
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i)
for (idx j = 0; j < static_cast<idx>(A.cols()); ++j)
if (vstr[i * A.cols() + j].size() > maxlengthcols[j])
maxlengthcols[j] = vstr[i * A.cols() + j].size();
// finally display it!
for (idx i = 0; i < static_cast<idx>(A.rows()); ++i) {
os << std::setw(static_cast<int>(maxlengthcols[0])) << std::right
<< vstr[i * A.cols()]; // display first column
// then the rest
for (idx j = 1; j < static_cast<idx>(A.cols()); ++j)
os << std::setw(static_cast<int>(maxlengthcols[j] + 2))
<< std::right << vstr[i * A.cols() + j];
if (i < static_cast<idx>(A.rows()) - 1)
os << std::endl;
}
return os;
}
};
} /* namespace internal */
} /* namespace qpp */
#endif /* INTERNAL_UTIL_H_ */
|
GB_binop__plus_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint8)
// A*D function (colscale): GB (_AxD__plus_uint8)
// D*A function (rowscale): GB (_DxB__plus_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint8)
// C=scalar+B GB (_bind1st__plus_uint8)
// C=scalar+B' GB (_bind1st_tran__plus_uint8)
// C=A+scalar GB (_bind2nd__plus_uint8)
// C=A'+scalar GB (_bind2nd_tran__plus_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT8 || GxB_NO_PLUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_omp.c | #include "convolution.h"
void f(int x)
{
printf("%d\n%", x);
return;
}
// Divides the each pixel value by normalize_amount in 2d array **img to
// normalize pixel values. Integer division is OK.
void normalize_output(image *img, int normalize_amount)
{
for (int r = 0; r < img->height; r++)
{
for (int c = 0; c < img->width; c++)
{
set_pixel(img, r, c,
img->pixels[r * img->width + c] / normalize_amount);
// get_pixel(img, r, c) / normalize_amount);
}
}
return;
}
image *extend_edges(image *img, int extend_amount)
{
int new_height, new_width;
new_height = img->height + 2 * extend_amount;
new_width = img->width + 2 * extend_amount;
// image *img_ext = copy_image(img);
// img_ext->height = new_height;
// img_ext->width = new_width;
image *img_ext = create_image(0x0, new_height, new_width);
int px;
// copy original
for (int r = 0; r < img->height; r++)
{
for (int c = 0; c < img->width; c++)
{
px = img->pixels[r * img->width + c];
// get_pixel(img, r, c);
set_pixel(img_ext, r + extend_amount, c + extend_amount, px);
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// fill top left
// px = get_pixel(img, 0, 0);
px = img->pixels[0 * img->width + 0];
for (int r = 0; r < extend_amount + 1; r++)
{
for (int c = 0; c < extend_amount + 1; c++)
{
set_pixel(img_ext, r, c, px);
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// fill top right
// px = get_pixel(img, 0, img->width - 1);
px = img->pixels[0 * img->width + img->width - 1];
for (int r = 0; r < extend_amount + 1; r++)
{
for (int c = img->width + extend_amount - 1; c < new_width + 1; c++)
{
set_pixel(img_ext, r, c, px);
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// fill bottom left
// px = get_pixel(img, img->height - 1, 0);
px = img->pixels[(img->height - 1) * img->width + 0];
for (int r = img->height + extend_amount - 1; r < new_height; r++)
{
for (int c = 0; c < extend_amount + 1; c++)
{
set_pixel(img_ext, r, c, px);
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// fill bottom right
// px = get_pixel(img, img->height - 1, img->width - 1);
px = img->pixels[(img->height - 1) * img->width + (img->width - 1)];
for (int r = img->height + extend_amount - 1; r < new_height; r++)
{
for (int c = img->width + extend_amount - 1; c < new_width; c++)
{
set_pixel(img_ext, r, c, px);
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// top
for (int r = extend_amount - 1; r >= 0; r--)
{
for (int c = extend_amount + 1; c <= new_width - extend_amount - 2; c++)
{
set_pixel(img_ext, r, c,
px = img->pixels[(r + 1) * img->width + (c)]);
// get_pixel(img_ext, r + 1, c));
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// bottom
for (int r = new_height - extend_amount; r < new_height; r++)
{
for (int c = extend_amount + 1; c <= new_width - extend_amount - 2; c++)
{
set_pixel(img_ext, r, c,
px = img->pixels[(r - 1) * img->width + (c)]);
// get_pixel(img_ext, r - 1, c));
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// right
for (int r = extend_amount + 1; r <= new_height - extend_amount - 2; r++)
{
for (int c = new_width - extend_amount; c < new_width; c++)
{
set_pixel(img_ext, r, c,
px = img->pixels[(r)*img->width + (c - 1)]);
// get_pixel(img_ext, r, c - 1));
}
}
// print_2d_matrix(img_ext, new_height, new_width);
// printf("============================\n\n");
// left
for (int r = extend_amount + 1; r <= new_height - extend_amount - 2; r++)
{
for (int c = extend_amount - 1; c >= 0; c--)
{
set_pixel(img_ext, r, c,
px = img->pixels[(r)*img->width + (c + 1)]);
// get_pixel(img_ext, r, c + 1));
}
}
delete_image(img);
return img_ext;
}
// Calculates the normalize index by simply summing up all elements in the
// kernel. If sum is 0, return 1. Otherwise, return the sum.
int kernel_sum(image *k)
{
int sum = 0;
for (int r = 0; r < k->height; r++)
for (int c = 0; c < k->width; c++)
{
sum += k->pixels[(r)*k->width + (c)];
// sum += get_pixel(k, r, c);
}
if (sum == 0)
return 1;
else
return sum;
}
// Core multiply-accumulate operation at given location of the image
int pixel_operation(image *k, image *img, int row_index, int col_index)
{
int sum = 0;
int k_offset = k->height / 2;
for (int r = row_index - k_offset,
k_r = 0;
k_r < k->height;
r++, k_r++)
{
for (int c = col_index - k_offset,
k_c = 0;
k_c < k->width;
c++, k_c++)
{
// int img_px = get_pixel(img, r, c);
int img_px = img->pixels[(r)*img->width + (c)];
// int k_px = get_pixel(k, k_r, k_c);
int k_px = k->pixels[(k_r)*k->width + (k_c)];
// printf("\t\t[img@%d,%d]: %d * [k@%d,%d]: %d\n", r, c, img_px, k_r, k_c, k_px);
sum += img_px * k_px;
}
}
return sum;
}
// Convolution method that convolves the image with given kernel. This
// method expected to call all other methods above.
image *convolve_image(image *k, image *img)
{
int kernel_norm = kernel_sum(k);
img = extend_edges(img, k->width / 2);
// print_image(k);
// printf("==========================\n");
// print_image(img);
// printf("==========================\n");
image *new_img = create_image(0x0, img->height, img->width);
#pragma omp parallal for shared(new_img, img, k)
for (int r = k->height; r < img->height - k->height; r++)
{
for (int c = k->width; c < img->width - k->width; c++)
{
int new_px = pixel_operation(k, img, r, c);
set_pixel(new_img, r, c, new_px);
}
// print_image(new_img);
// printf("==========================\n");
}
normalize_output(img, kernel_norm);
return new_img;
} |
GrB_Vector_nvals.c | //------------------------------------------------------------------------------
// GrB_Vector_nvals: number of entries in a sparse vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // number of entries
const GrB_Vector v // vector to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Vector_nvals (&nvals, v)") ;
GB_BURBLE_START ("GrB_Vector_nvals") ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT (GB_VECTOR_OK (v)) ;
//--------------------------------------------------------------------------
// get the number of entries
//--------------------------------------------------------------------------
GrB_Info info = GB_nvals (nvals, (GrB_Matrix) v, Context) ;
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
FasterLookup.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "src/FasterLookup.c"
#else
// add two vectors
static inline void nn_(FasterLookup_addVec)(
real *res, real alpha, real *vec, int dim) {
int i;
int m = dim - 3;
for (i = 0; i < m; i += 4) {
res[i] += alpha * vec[i];
res[i+1] += alpha * vec[i+1];
res[i+2] += alpha * vec[i+2];
res[i+3] += alpha * vec[i+3];
}
for ( ; i < dim; ++i)
res[i] += alpha * vec[i];
}
// check if input goes outside allowed indicies
static int nn_(FasterLookup_boundError)(
int n_inputs, int max_index, int* input) {
int i; int idx; int err = 0;
for(i=0; i<n_inputs; i++){
idx = *input++;
err = err || idx < 1 || idx > max_index;
}
return err;
}
// accumulate into (grad)weights
static void nn_(FasterLookup_acc)(THTensor *tWeight, real scale,
THIntTensor *tInput, THTensor *tGradOutput, THIntTensor *tCount,
int concUpdates){
// make sure input, gradOutput are contiguous
tInput = THIntTensor_newContiguous(tInput);
tGradOutput = THTensor_(newContiguous)(tGradOutput);
real * weight = THTensor_(data)(tWeight);
int * input = THIntTensor_data(tInput);
real * gradOutput = THTensor_(data)(tGradOutput);
int * count = (tCount) ? (THIntTensor_data(tCount)) : (NULL);
// update
int n_inputs = THIntTensor_nElement(tInput);
int dim = tWeight->size[1];
int i;
int idx;
if (concUpdates) { // with OMP, concurrent updates, might drop some updates
#pragma omp parallel for private(i, idx)
for(i=0; i<n_inputs; i++){
idx = input[i] - 1;
real s = (count) ? (scale / (real)count[idx]) : scale;
real *w = weight + dim * idx;
nn_(FasterLookup_addVec)(w, s, gradOutput + dim * i, dim);
}
} else { // without OMP
for(i=0; i<n_inputs; i++){
idx = input[i] - 1;
real s = (count) ? (scale / (real)count[idx]) : scale;
real *w = weight + dim * idx;
nn_(FasterLookup_addVec)(w, s, gradOutput + dim * i, dim);
}
}
THIntTensor_free(tInput);
THTensor_(free)(tGradOutput);
}
// count frequency of each index
static void nn_(FasterLookup_incrementCount)(
THIntTensor *tInput, THIntTensor *tCount, int reset) {
// make sure input is contiguous
tInput = THIntTensor_newContiguous(tInput);
int * input = THIntTensor_data(tInput);
int * count = THIntTensor_data(tCount);
int n_inputs = THIntTensor_nElement(tInput);
int i;
count -= 1; // this is lua everything starts at 1
int * cur_input = input;
// set to 0 seen indices if necessary
if (reset) { for(i=0; i<n_inputs; i++){ count[*cur_input++] = 0; } }
// count seen indices
cur_input = input;
for(i=0; i<n_inputs; i++){ count[*cur_input++]++; }
THIntTensor_free(tInput);
}
int nn_(FasterLookup_updateOutput)(lua_State *L) {
THIntTensor *tInput = luaT_checkudata(L, 2, "torch.IntTensor");
THTensor * tWeight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
int skipBC = luaT_getfieldcheckboolean(L, 1, "skipBoundChecking");
THTensor * tOutput = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
tInput = THIntTensor_newContiguous(tInput); // make sure input is contiguous
int dim = tWeight->size[1];
if (tInput->nDimension == 1) { // resize output
THTensor_(resize2d)(tOutput, tInput->size[0], dim);
} else if (tInput->nDimension == 2) {
THTensor_(resize3d)(tOutput, tInput->size[0], tInput->size[1], dim);
} else {
luaL_error(L, "input should have 1 or 2 dimensions");
}
int n_inputs = THIntTensor_nElement(tInput);
int *input = THIntTensor_data(tInput); // pointers
real * weight = THTensor_(data)(tWeight);
real * output = THTensor_(data)(tOutput);
if (!skipBC) { // bound checking?
int max_index = tWeight->size[0];
int err = nn_(FasterLookup_boundError)(n_inputs, max_index, input);
if (err) { luaL_error(L, "input contains an index out of bounds"); }
}
int i;
size_t vec_size = dim*sizeof(real);
weight -= dim; // this is lua everything starts at 1
#pragma omp parallel for private(i)
for(i=0; i<n_inputs; i++){
memcpy(output + i*dim, weight + input[i]*dim, vec_size);
}
THIntTensor_free(tInput);
return 1;
}
int nn_(FasterLookup_updateParameters)(lua_State *L){
real lr = (real)luaL_checknumber(L, 2);
THTensor * tWeight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
THTensor * tGradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor);
THIntTensor * tCount = luaT_getfieldcheckudata(L, 1, "count", "torch.IntTensor");
int scaleGradByFreq = luaT_getfieldcheckboolean(L, 1, "scaleGradByFreq");
real * weight = THTensor_(data)(tWeight);
real * gradWeight = THTensor_(data)(tGradWeight);
int * count = THIntTensor_data(tCount);
int i;
int c;
int n_indexes = tWeight->size[0];
int dim = tWeight->size[1];
#pragma omp parallel for private(i, c)
for(i=0; i < n_indexes; i++){
c = count[i];
if (c > 0) { // each non zero count need add
real scale = (scaleGradByFreq) ? (lr / ((real)c)) : (lr);
real *w = weight + dim * i;
real *gw = gradWeight + dim * i;
nn_(FasterLookup_addVec)(w, -scale, gw, dim);
}
}
return 0;
}
int nn_(FasterLookup_accGradParameters)(lua_State *L){
THIntTensor * tInput = luaT_checkudata(L, 2, "torch.IntTensor");
THTensor * tGradOutput = luaT_checkudata(L, 3, torch_Tensor);
real scale = (real)luaL_checknumber(L, 4);
THTensor * tGradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor);
// increment count
THIntTensor * tCount = luaT_getfieldcheckudata(L, 1, "count", "torch.IntTensor");
nn_(FasterLookup_incrementCount)(tInput, tCount, 0);
// increment grad weight
int concUpdates = luaT_getfieldcheckboolean(L, 1, "concUpdates");
nn_(FasterLookup_acc)(tGradWeight, scale, tInput, tGradOutput, NULL, concUpdates);
return 0;
}
int nn_(FasterLookup_accUpdateGradParameters)(lua_State *L){
THIntTensor * tInput = luaT_checkudata(L, 2, "torch.IntTensor");
THTensor * tGradOutput = luaT_checkudata(L, 3, torch_Tensor);
real lr = (real)luaL_checknumber(L, 4);
THTensor * tWeight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor);
// reset and increment count
THIntTensor * tCount = NULL;
int scaleGradByFreq = luaT_getfieldcheckboolean(L, 1, "scaleGradByFreq");
if (scaleGradByFreq) {
tCount = luaT_getfieldcheckudata(L, 1, "count", "torch.IntTensor");
nn_(FasterLookup_incrementCount)(tInput, tCount, 1);
}
// increment weight
int concUpdates = luaT_getfieldcheckboolean(L, 1, "concUpdates");
nn_(FasterLookup_acc)(tWeight, -lr, tInput, tGradOutput, tCount, concUpdates);
return 0;
}
static const struct luaL_Reg nn_(FasterLookup__) [] = {
{"FasterLookup_updateOutput", nn_(FasterLookup_updateOutput)},
{"FasterLookup_updateParameters", nn_(FasterLookup_updateParameters)},
{"FasterLookup_accGradParameters", nn_(FasterLookup_accGradParameters)},
{"FasterLookup_accUpdateGradParameters",nn_(FasterLookup_accUpdateGradParameters)},
{NULL, NULL}
};
void nn_(FasterLookup_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(FasterLookup__), "nn");
lua_pop(L,1);
}
#endif
|
nr_numint.c | /* Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <assert.h>
#include "config.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#define BOXSIZE 56
int VXCao_empty_blocks(int8_t *empty, uint8_t *non0table, int *shls_slice,
int *ao_loc)
{
if (non0table == NULL || shls_slice == NULL || ao_loc == NULL) {
return 0;
}
const int sh0 = shls_slice[0];
const int sh1 = shls_slice[1];
int bas_id;
int box_id = 0;
int bound = BOXSIZE;
int has0 = 0;
empty[box_id] = 1;
for (bas_id = sh0; bas_id < sh1; bas_id++) {
empty[box_id] &= !non0table[bas_id];
if (ao_loc[bas_id] == bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = 1;
} else if (ao_loc[bas_id] > bound) {
has0 |= empty[box_id];
box_id++;
bound += BOXSIZE;
empty[box_id] = !non0table[bas_id];
}
}
return has0;
}
static void dot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int bgrids,
uint8_t *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
int8_t empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
double beta = 0;
if (has0) {
int box_id, blen, i, j;
size_t b0;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&D1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
dgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&D1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCdot_ao_dm(double *vm, double *ao, double *dm,
int nao, int nocc, int ngrids, int nbas,
uint8_t *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* vv[n,m] = ao1[n,ngrids] * ao2[m,ngrids] */
static void dot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int bgrids, int hermi,
uint8_t *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
int8_t empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double D1 = 1;
if (has0) {
int ib, jb, leni, lenj;
int j1 = nbox;
size_t b0i, b0j;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
dgemm_(&TRANS_T, &TRANS_N, &lenj, &leni, &bgrids, &D1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&D1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
dgemm_(&TRANS_T, &TRANS_N, &nao, &nao, &bgrids,
&D1, ao2, &ngrids, ao1, &ngrids, &D1, vv, &nao);
}
}
/* vv[nao,nao] = ao1[i,nao] * ao2[i,nao] */
void VXCdot_ao_ao(double *vv, double *ao1, double *ao2,
int nao, int ngrids, int nbas, int hermi,
uint8_t *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
size_t Nao = nao;
NPdset0(vv, Nao * Nao);
#pragma omp parallel
{
int ip, ib;
double *v_priv = calloc(Nao*Nao+2, sizeof(double));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < Nao*Nao; ip++) {
vv[ip] += v_priv[ip];
}
}
free(v_priv);
}
if (hermi != 0) {
NPdsymm_triu(nao, vv, hermi);
}
}
// 'nip,np->ip'
void VXC_dscale_ao(double *aow, double *ao, double *wv,
int comp, int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
size_t ao_size = nao * Ngrids;
int i, j, ic;
double *pao = ao;
#pragma omp for schedule(static)
for (i = 0; i < nao; i++) {
pao = ao + i * Ngrids;
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] = pao[j] * wv[j];
}
for (ic = 1; ic < comp; ic++) {
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j];
} }
}
}
}
// 'ip,ip->p'
void VXC_dcontract_rho(double *rho, double *bra, double *ket,
int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
int nthread = omp_get_num_threads();
int blksize = MAX((Ngrids+nthread-1) / nthread, 1);
int ib, b0, b1, i, j;
#pragma omp for
for (ib = 0; ib < nthread; ib++) {
b0 = ib * blksize;
b1 = MIN(b0 + blksize, ngrids);
for (j = b0; j < b1; j++) {
rho[j] = bra[j] * ket[j];
}
for (i = 1; i < nao; i++) {
for (j = b0; j < b1; j++) {
rho[j] += bra[i*Ngrids+j] * ket[i*Ngrids+j];
} }
}
}
}
void VXC_vv10nlc(double *Fvec, double *Uvec, double *Wvec,
double *vvcoords, double *coords,
double *W0p, double *W0, double *K, double *Kp, double *RpW,
int vvngrids, int ngrids)
{
#pragma omp parallel
{
double DX, DY, DZ, R2;
double gp, g, gt, T, F, U, W;
int i, j;
#pragma omp for schedule(static)
for (i = 0; i < ngrids; i++) {
F = 0;
U = 0;
W = 0;
for (j = 0; j < vvngrids; j++) {
DX = vvcoords[j*3+0] - coords[i*3+0];
DY = vvcoords[j*3+1] - coords[i*3+1];
DZ = vvcoords[j*3+2] - coords[i*3+2];
R2 = DX*DX + DY*DY + DZ*DZ;
gp = R2*W0p[j] + Kp[j];
g = R2*W0[i] + K[i];
gt = g + gp;
T = RpW[j] / (g*gp*gt);
F += T;
T *= 1./g + 1./gt;
U += T;
W += T * R2;
}
Fvec[i] = F * -1.5;
Uvec[i] = U;
Wvec[i] = W;
}
}
}
|
wand-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% W W AAA N N DDDD %
% W W A A NN N D D %
% W W W AAAAA N N N D D %
% WW WW A A N NN D D %
% W W A A N N DDDD %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickWand Wand View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/wand.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define WandViewId "WandView"
/*
Typedef declarations.
*/
struct _WandView
{
size_t
id;
char
name[MaxTextExtent],
*description;
RectangleInfo
extent;
MagickWand
*wand;
Image
*image;
CacheView
*view;
PixelWand
***pixel_wands;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneWandView() makes a copy of the specified wand view.
%
% The format of the CloneWandView method is:
%
% WandView *CloneWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport WandView *CloneWandView(const WandView *wand_view)
{
WandView
*clone_view;
register ssize_t
i;
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) clone_view->id);
clone_view->description=ConstantString(wand_view->description);
clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue,
wand_view->exception);
clone_view->view=CloneCacheView(wand_view->view);
clone_view->extent=wand_view->extent;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,wand_view->exception);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
wand_view->pixel_wands[i],wand_view->extent.width);
clone_view->debug=wand_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyWandView() deallocates memory associated with a wand view.
%
% The format of the DestroyWandView method is:
%
% WandView *DestroyWandView(WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport WandView *DestroyWandView(WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands,
wand_view->extent.width);
wand_view->image=DestroyImage(wand_view->image);
wand_view->view=DestroyCacheView(wand_view->view);
wand_view->exception=DestroyExceptionInfo(wand_view->exception);
wand_view->signature=(~WandSignature);
RelinquishWandId(wand_view->id);
wand_view=(WandView *) RelinquishMagickMemory(wand_view);
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferWandViewIterator() iterates over three wand views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination wand view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const WandView *source,
% const WandView *duplex,WandView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferWandViewIterator method is:
%
% MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
% WandView *duplex,WandView *destination,
% DuplexTransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o duplex: the duplex wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source,
WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer,
void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*restrict duplex_pixels,
*restrict pixels;
register ssize_t
x;
register Quantum
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) duplex->extent.width; x++)
{
PixelSetQuantumPixel(duplex->image,duplex_pixels,
duplex->pixel_wands[id][x]);
duplex_pixels+=GetPixelChannels(duplex->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a wand view.
%
% The format of the GetWandViewException method is:
%
% char *GetWandViewException(const WandView *wand_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o wand_view: the pixel wand_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetWandViewException(const WandView *wand_view,
ExceptionType *severity)
{
char
*description;
assert(wand_view != (const WandView *) NULL);
assert(wand_view->signature == WandSignature);
if (wand_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=wand_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
wand_view->name);
*description='\0';
if (wand_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->reason),
MaxTextExtent);
if (wand_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
wand_view->exception->severity,wand_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewExtent() returns the wand view extent.
%
% The format of the GetWandViewExtent method is:
%
% RectangleInfo GetWandViewExtent(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewIterator() iterates over the wand view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const WandView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetWandViewIterator method is:
%
% MagickBooleanType GetWandViewIterator(WandView *source,
% GetWandViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetWandViewIterator(WandView *source,
GetWandViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewPixels() returns the wand view pixel_wands.
%
% The format of the GetWandViewPixels method is:
%
% PixelWand *GetWandViewPixels(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport PixelWand **GetWandViewPixels(const WandView *wand_view)
{
const int
id = GetOpenMPThreadId();
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t W a n d V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetWandViewWand() returns the magick wand associated with the wand view.
%
% The format of the GetWandViewWand method is:
%
% MagickWand *GetWandViewWand(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickWand *GetWandViewWand(const WandView *wand_view)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
return(wand_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsWandView() returns MagickTrue if the the parameter is verified as a wand
% view object.
%
% The format of the IsWandView method is:
%
% MagickBooleanType IsWandView(const WandView *wand_view)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
*/
WandExport MagickBooleanType IsWandView(const WandView *wand_view)
{
size_t
length;
if (wand_view == (const WandView *) NULL)
return(MagickFalse);
if (wand_view->signature != WandSignature)
return(MagickFalse);
length=strlen(WandViewId);
if (LocaleNCompare(wand_view->name,WandViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandView() returns a wand view required for all other methods in the
% Wand View API.
%
% The format of the NewWandView method is:
%
% WandView *NewWandView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands)
{
PixelWand
***pixel_wands;
register ssize_t
i;
size_t
number_threads;
number_threads=GetOpenMPMaximumThreads();
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands));
}
return(pixel_wands);
}
WandExport WandView *NewWandView(MagickWand *wand)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
wand_view->wand=wand;
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->extent.width=wand->images->columns;
wand_view->extent.height=wand->images->rows;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
wand_view->exception=exception;
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w W a n d V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewWandViewExtent() returns a wand view required for all other methods
% in the Wand View API.
%
% The format of the NewWandViewExtent method is:
%
% WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ExceptionInfo
*exception;
WandView
*wand_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view));
if (wand_view == (WandView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(wand_view,0,sizeof(*wand_view));
wand_view->id=AcquireWandId();
(void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g",
WandViewId,(double) wand_view->id);
wand_view->description=ConstantString("WandView");
exception=AcquireExceptionInfo();
wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception);
wand_view->wand=wand;
wand_view->extent.width=width;
wand_view->extent.height=height;
wand_view->extent.x=x;
wand_view->extent.y=y;
wand_view->exception=exception;
wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width);
if (wand_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
wand_view->debug=IsEventLogging();
wand_view->signature=WandSignature;
return(wand_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewDescription() associates a description with an image view.
%
% The format of the SetWandViewDescription method is:
%
% void SetWandViewDescription(WandView *image_view,const char *description)
%
% A description of each parameter follows:
%
% o wand_view: the wand view.
%
% o description: the wand view description.
%
*/
MagickExport void SetWandViewDescription(WandView *wand_view,
const char *description)
{
assert(wand_view != (WandView *) NULL);
assert(wand_view->signature == WandSignature);
wand_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetWandViewIterator() iterates over the wand view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetWandViewIterator method is:
%
% MagickBooleanType SetWandViewIterator(WandView *destination,
% SetWandViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the wand view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetWandViewIterator(WandView *destination,
SetWandViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (WandView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetWandViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
pixels);
pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetWandViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferWandViewIterator() iterates over two wand views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination wand view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const WandView *source,
% WandView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferWandViewIterator method is:
%
% MagickBooleanType TransferWandViewIterator(WandView *source,
% WandView *destination,TransferWandViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o destination: the destination wand view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferWandViewIterator(WandView *source,
WandView *destination,TransferWandViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const Quantum
*restrict pixels;
register ssize_t
x;
register Quantum
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelSetQuantumPixel(destination->image,destination_pixels,
destination->pixel_wands[id][x]);
destination_pixels+=GetPixelChannels(destination->image);
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
for (x=0; x < (ssize_t) destination->extent.width; x++)
{
PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x],
destination_pixels);
destination_pixels+=GetPixelChannels(destination->image);
}
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e W a n d V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateWandViewIterator() iterates over the wand view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateWandViewIterator method is:
%
% MagickBooleanType UpdateWandViewIterator(WandView *source,
% UpdateWandViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source wand view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdateWandViewIterator(WandView *source,
UpdateWandViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (WandView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdateWandViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register ssize_t
x;
register Quantum
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]);
pixels+=GetPixelChannels(source->image);
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->extent.width; x++)
{
PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels);
pixels+=GetPixelChannels(source->image);
}
sync=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdateWandViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
net_sha1_fmt_plug.c | /* Cracker for "Keyed SHA1" network authentication hashes.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Added linkage to dynamic (type dynamic_40) for any salt 230 bytes or less,
* by Jim Fougeron. Any salts > 239 bytes will still be handled by this full
* format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes
* of salt. I think we might be able to get 239 bytes (due to a few issues).
* 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts
* within dynamic.
*/
#if AC_BUILT
#include "autoconfig.h"
#endif
#ifndef DYNAMIC_DISABLED
#if FMT_EXTERNS_H
extern struct fmt_main fmt_netsha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_netsha1);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include "formats.h"
#include "dynamic.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "net-sha1"
#define FORMAT_NAME "\"Keyed SHA1\" BFD"
#define FORMAT_TAG "$netsha1$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 20 // get this right ;)
#define BINARY_SIZE 20
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_SALT_LEN 1024
static struct fmt_tests tests[] = {
/* Real hashes from Cisco routers ;) */
{"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000001$709d3307304d790f58bf0a3cefd783b438408996", "password12345"},
{"$netsha1$20440a340000000100000000000f4240000f424000000000051c010000000002$94bce4d9084199508669b39f044064082a093de3", "password12345"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void get_ptr();
static void init(struct fmt_main *self);
static void done(void);
#define MAGIC 0xfe5aa5ef
static struct custom_salt {
ARCH_WORD_32 magic;
int length;
unsigned char salt[MAX_SALT_LEN]; // fixed size, but should be OK
} *cur_salt;
static int dyna_salt_seen=0;
static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough.
static struct fmt_main *pDynamicFmt, *pNetSha1_Dyna;
/* this function converts a 'native' net-sha1 signature string into a $dynamic_40$ syntax string */
static char *Convert(char *Buf, char *ciphertext)
{
char *cp, *cp2;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
return ciphertext;
cp = strchr(&ciphertext[2], '$');
if (!cp)
return "*";
cp2 = strchr(&cp[1], '$');
if (!cp2)
return "*";
snprintf(Buf, sizeof(Conv_Buf), "$dynamic_40$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp);
return Buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > MAX_SALT_LEN * 2)
return 0;
len = strspn(q, HEXCHARS_lc);
if (len != BINARY_SIZE * 2 || len != strlen(q)) {
get_ptr();
return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt);
}
if (strspn(p, HEXCHARS_lc) != q - p - 1)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static char *pBuf=NULL;
struct custom_salt *cs;
char *orig_ct = ciphertext;
int i, len;
if (!pBuf) pBuf = (char *)mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
cs = (struct custom_salt*) pBuf;
memset(cs, 0, sizeof(*cs));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
if (len < 230) {
// return our memset buffer (putting the dyna salt pointer into it).
// This keeps the 'pre-cleaned salt() warning from hitting this format)
//return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct));
memcpy((char*)cs, pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size);
dyna_salt_seen=1;
return cs;
}
cs->magic = MAGIC;
cs->length = len;
return cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) {
unsigned char *cp = pDynamicFmt->methods.binary(ciphertext);
memset(out, 0, sizeof(buf.c));
memcpy(out, cp, pDynamicFmt->params.binary_size); // binary size is 16
return out;
}
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[0](index); return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[1](index); return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[2](index); return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[3](index); return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[4](index); return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[5](index); return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[6](index); return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
get_ptr();
if (cur_salt->magic != MAGIC) {
pDynamicFmt->methods.set_salt(salt);
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.crypt_all(pcount, salt);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->salt, cur_salt->length);
SHA1_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_all(binary, count);
}
for (; index < count; index++)
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_one(binary, index);
}
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void netsha1_set_key(char *key, int index)
{
if (dyna_salt_seen)
pDynamicFmt->methods.set_key(key, index);
/* strncpy will pad with zeros, which is needed */
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) {
get_ptr();
if (text_in_dynamic_format_already(pDynamicFmt, hash))
return hash;
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_netsha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
netsha1_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
static void get_ptr() {
if (!pDynamicFmt) {
char *Buf;
pNetSha1_Dyna = mem_alloc_tiny(sizeof(fmt_netsha1), 16);
memcpy(pNetSha1_Dyna, &fmt_netsha1, sizeof(fmt_netsha1));
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 0);
fmt_netsha1.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt;
fmt_netsha1.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt;
Buf = mem_alloc_tiny(strlen(fmt_netsha1.params.algorithm_name) + 4 + strlen("dynamic_40") + 1, 1);
sprintf(Buf, "%s or %s", fmt_netsha1.params.algorithm_name, "dynamic_40");
fmt_netsha1.params.algorithm_name = Buf;
//pDynamicFmt->methods.init(pDynamicFmt);
}
}
static void init(struct fmt_main *self)
{
// We have to allocate our dyna_40 object first, because we get 'modified' min/max counts from there.
get_ptr();
if (self->private.initialized == 0) {
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetSha1_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-sha1", 1);
self->private.initialized = 1;
}
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
pDynamicFmt->methods.done();
}
#endif /* plugin stanza */
#endif /* DYNAMIC_DISABLED */
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPFeaturesStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as a non-type, and an expression representing
/// that name has been formed.
NC_ContextIndependentExpr,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification ContextIndependentExpr(ExprResult E) {
NameClassification Result(NC_ContextIndependentExpr);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_ContextIndependentExpr);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs);
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
/// Check whether the given type-dependent expression will be the name of a
/// function or another callable function-like entity (e.g. a function
// template or overload set) for any substitution.
bool IsDependentFunctionNameExpr(Expr *E);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block,
/// A type constraint,
UPPC_TypeConstraint
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const Expr *CoprocArg, bool WantCDE);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
geometry_para.c | /*
* Copyright 2010 (c) SINTEF ICT, Applied Mathematics.
* Jostein R. Natvig <Jostein.R.Natvig at sintef.no>
*/
#include <omp.h>
#include <math.h>
#include <stdio.h>
#include "geometry.h"
/* ------------------------------------------------------------------ */
static void
cross(const double u[3], const double v[3], double w[3])
/* ------------------------------------------------------------------ */
{
w[0] = u[1]*v[2]-u[2]*v[1];
w[1] = u[2]*v[0]-u[0]*v[2];
w[2] = u[0]*v[1]-u[1]*v[0];
}
/* ------------------------------------------------------------------ */
static double
norm(const double w[3])
/* ------------------------------------------------------------------ */
{
return sqrt(w[0]*w[0] + w[1]*w[1] + w[2]*w[2]);
}
/* ------------------------------------------------------------------ */
void
compute_face_geometry(int ndims, double *coords, int nfaces,
int *nodepos, int *facenodes, double *fnormals,
double *fcentroids, double *fareas)
/* ------------------------------------------------------------------ */
{
/* Assume 3D for now */
int f;
double x[3];
double u[3];
double v[3];
double w[3];
int i,k;
int node;
double cface[3] = {0};
double n[3] = {0};
double twothirds = 0.666666666666666666666666666667;
double a;
int num_face_nodes;
double area;
/*#pragma omp parallel for */
/*#pragma omp parallel for shared(fnormals,fcentroids,fareas)*/
#pragma omp parallel for default(none) \
private(f,x,u,v,w,i,k,node,cface,n,a,num_face_nodes,area) \
shared(fnormals,fcentroids,fareas \
,coords, nfaces, nodepos, facenodes) \
firstprivate(ndims, twothirds)
for (f=0; f<nfaces; ++f)
{
for(i=0; i<ndims; ++i) x[i] = 0.0;
for(i=0; i<ndims; ++i) n[i] = 0.0;
for(i=0; i<ndims; ++i) cface[i] = 0.0;
/* average node */
for(k=nodepos[f]; k<nodepos[f+1]; ++k)
{
node = facenodes[k];
for (i=0; i<ndims; ++i) x[i] += coords[3*node+i];
}
num_face_nodes = nodepos[f+1] - nodepos[f];
for(i=0; i<ndims; ++i) x[i] /= num_face_nodes;
/* compute first vector u (to the last node in the face) */
node = facenodes[nodepos[f+1]-1];
for(i=0; i<ndims; ++i) u[i] = coords[3*node+i] - x[i];
area=0;
/* Compute triangular contrib. to face normal and face centroid*/
for(k=nodepos[f]; k<nodepos[f+1]; ++k)
{
node = facenodes[k];
for (i=0; i<ndims; ++i) v[i] = coords[3*node+i] - x[i];
cross(u,v,w);
a = 0.5*norm(w);
area += a;
/* if(!(a>0))
{
fprintf(stderr, "Internal error in compute_face_geometry.");
}
*/
/* face normal */
for (i=0; i<ndims; ++i) n[i] += w[i];
/* face centroid */
for (i=0; i<ndims; ++i)
cface[i] += a*(x[i]+twothirds*0.5*(u[i]+v[i]));
/* Store v in u for next iteration */
for (i=0; i<ndims; ++i) u[i] = v[i];
}
/* Store face normal and face centroid */
for (i=0; i<ndims; ++i)
{
/* normal is scaled with face area */
fnormals [3*f+i] = 0.5*n[i];
fcentroids[3*f+i] = cface[i]/area;
}
fareas[f] = area;
}
}
/* ------------------------------------------------------------------ */
void
compute_cell_geometry(int ndims, double *coords,
int *nodepos, int *facenodes, int *neighbors,
double *fnormals,
double *fcentroids,
int ncells, int *facepos, int *cellfaces,
double *ccentroids, double *cvolumes)
/* ------------------------------------------------------------------ */
{
int i,k, f,c;
int face,node;
double x[3];
double u[3];
double v[3];
double w[3];
double xcell[3];
double ccell[3];
double cface[3] = {0};
int num_faces;
double volume;
double tet_volume, subnormal_sign;
double twothirds = 0.666666666666666666666666666667;
#pragma omp parallel for default(none) \
private(i,k,f,c,face,node,x,u,v,w,xcell \
,ccell ,cface,num_faces,volume, tet_volume, subnormal_sign) \
shared(coords,nodepos,facenodes,neighbors, \
fnormals,fcentroids,facepos,cellfaces,ccentroids,cvolumes) \
firstprivate(ncells,ndims,twothirds)
for (c=0; c<ncells; ++c)
{
for(i=0; i<ndims; ++i) xcell[i] = 0.0;
for(i=0; i<ndims; ++i) ccell[i] = 0.0;
/*
* Approximate cell center as average of face centroids
*/
for(f=facepos[c]; f<facepos[c+1]; ++f)
{
face = cellfaces[f];
for (i=0; i<ndims; ++i) xcell[i] += fcentroids[3*face+i];
}
num_faces = facepos[c+1] - facepos[c];
for(i=0; i<ndims; ++i) xcell[i] /= num_faces;
/*
* For all faces, add tetrahedron's volume and centroid to
* 'cvolume' and 'ccentroid'.
*/
volume=0;
for(f=facepos[c]; f<facepos[c+1]; ++f)
{
int num_face_nodes;
for(i=0; i<ndims; ++i) x[i] = 0.0;
for(i=0; i<ndims; ++i) cface[i] = 0.0;
face = cellfaces[f];
/* average face node x */
for(k=nodepos[face]; k<nodepos[face+1]; ++k)
{
node = facenodes[k];
for (i=0; i<ndims; ++i) x[i] += coords[3*node+i];
}
num_face_nodes = nodepos[face+1] - nodepos[face];
for(i=0; i<ndims; ++i) x[i] /= num_face_nodes;
/* compute first vector u (to the last node in the face) */
node = facenodes[nodepos[face+1]-1];
for(i=0; i<ndims; ++i) u[i] = coords[3*node+i] - x[i];
/* Compute triangular contributions to face normal and face centroid */
for(k=nodepos[face]; k<nodepos[face+1]; ++k)
{
node = facenodes[k];
for (i=0; i<ndims; ++i) v[i] = coords[3*node+i] - x[i];
cross(u,v,w);
tet_volume = 0;
for(i=0; i<ndims; ++i){
tet_volume += 0.5/3 * w[i]*(x[i]-xcell[i]);
/*tet_volume += 0.5/3 * w[i]*(x[i]-xcell[i]);*/
}
/*tet_volume = fabs(tet_volume);*/
subnormal_sign=0.0;
for(i=0; i<ndims; ++i){
subnormal_sign += w[i]*fnormals[3*face+i];
}
if(subnormal_sign<0){
tet_volume*=-1.0;
}
if(!(neighbors[2*face+0]==c)){
tet_volume*=-1.0;
}
volume += tet_volume;
/* face centroid of triangle */
for (i=0; i<ndims; ++i) cface[i] = (x[i]+(twothirds)*0.5*(u[i]+v[i]));
/* Cell centroid */
for (i=0; i<ndims; ++i) ccell[i] += tet_volume * 3/4.0*(cface[i] - xcell[i]);
/* Store v in u for next iteration */
for (i=0; i<ndims; ++i) u[i] = v[i];
}
}
for (i=0; i<ndims; ++i) ccentroids[3*c+i] = xcell[i] + ccell[i]/volume;
cvolumes[c] = volume;
}
}
|
GB_transpose_bucket.c | //------------------------------------------------------------------------------
// GB_transpose_bucket: transpose and optionally typecast and/or apply operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// C = A' or op(A'). Optionally typecasts from A->type to the new type ctype,
// and/or optionally applies a unary operator.
// If an operator z=op(x) is provided, the type of z must be the same as the
// type of C. The type of A must be compatible with the type of of x (A is
// typecasted into the type of x). These conditions must be checked in the
// caller.
// The input matrix A may have jumbled row indices; this is OK.
// The output matrix C will always have sorted row indices.
// This function is agnostic for the CSR/CSC format of C and A. C_is_csc is
// defined by the caller and assigned to C->is_csc, but otherwise unused.
// A->is_csc is ignored.
// The input can be hypersparse or non-hypersparse. The output C is always
// non-hypersparse, and never shallow.
// If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is
// O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if
// most rows and columns of A are non-empty, but can be very costly if A or A'
// is hypersparse. In particular, if A is a non-hypersparse column vector with
// m >> e, the time and memory is O(m), which can be huge. Thus, for
// hypersparse matrices, or for very sparse matrices, the qsort method should
// be used instead (see GB_transpose).
// This method is parallel, but not highly scalable. At most O(e/m) threads
// are used.
#include "GB_transpose.h"
#define GB_FREE_WORK \
{ \
if (Rowcounts != NULL) \
{ \
for (int taskid = 0 ; taskid < naslice ; taskid++) \
{ \
GB_FREE_MEMORY (Rowcounts [taskid], vlen+1, sizeof (int64_t)) ; \
} \
} \
GB_FREE_MEMORY (Rowcounts, naslice, sizeof (int64_t *)) ; \
GB_FREE_MEMORY (A_slice, naslice+1, sizeof (int64_t)) ; \
}
#define GB_FREE_ALL \
{ \
GB_MATRIX_FREE (&C) ; \
GB_FREE_WORK ; \
}
GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op
(
GrB_Matrix *Chandle, // output matrix (unallocated on input)
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix A, // input matrix
const GrB_UnaryOp op, // operator to apply, NULL if no operator
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ;
(*Chandle) = NULL ;
ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ;
// OK if the matrix A is jumbled; this function is intended to sort it.
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A input for transpose_bucket", GB0) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
if (op != NULL)
{
ASSERT_UNARYOP_OK (op, "op for transpose", GB0) ;
ASSERT (ctype == op->ztype) ;
ASSERT (GB_Type_compatible (A->type, op->xtype)) ;
}
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t vlen = A->vlen ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
// # of threads to use in the O(vlen) loops below
int nthreads = GB_nthreads (vlen, chunk, nthreads_max) ;
// A is sliced into naslice parts, so that each part has at least vlen
// entries. The workspace required is naslice*vlen, so this ensures
// the workspace is no more than the size of A.
// naslice < floor (anz / vlen) < anz / vlen
// thus naslice*vlen < anz
// also, naslice < nthreads_max, since each part will be about the same size
int naslice = GB_nthreads (anz, GB_IMAX (vlen, chunk), nthreads_max) ;
int64_t *GB_RESTRICT A_slice = NULL ; // size naslice+1
int64_t *GB_RESTRICT *Rowcounts = NULL ; // size naslice
//--------------------------------------------------------------------------
// allocate C: always non-hypersparse
//--------------------------------------------------------------------------
// The bucket transpose only works when C is not hypersparse.
// A can be hypersparse.
// [ C->p is allocated but not initialized. It is NON-hypersparse.
GrB_Info info ;
GrB_Matrix C = NULL ;
GB_CREATE (&C, ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc,
GB_FORCE_NONHYPER, A->hyper_ratio, vlen, anz, true, Context) ;
GB_OK (info) ;
int64_t *GB_RESTRICT Cp = C->p ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_CALLOC_MEMORY (Rowcounts, naslice, sizeof (int64_t *)) ;
if (Rowcounts == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
int64_t *rowcount = NULL ;
GB_CALLOC_MEMORY (rowcount, vlen + 1, sizeof (int64_t)) ;
if (rowcount == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
Rowcounts [taskid] = rowcount ;
}
//--------------------------------------------------------------------------
// phase1: symbolic analysis
//--------------------------------------------------------------------------
// create the iterator for A
GBI_single_iterator Iter ;
if (!GB_pslice (&A_slice, /* A */ A->p, A->nvec, naslice))
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
GBI1_init (&Iter, A) ;
// sum up the row counts and find C->p
if (naslice == 1)
{
//----------------------------------------------------------------------
// A is not sliced
//----------------------------------------------------------------------
// compute the row counts of A. No need to scan the A->p pointers
int64_t *GB_RESTRICT rowcount = Rowcounts [0] ;
const int64_t *GB_RESTRICT Ai = A->i ;
for (int64_t p = 0 ; p < anz ; p++)
{
rowcount [Ai [p]]++ ;
}
// cumulative sum of the rowcount, and copy back into C->p
GB_cumsum (rowcount, vlen, (&C->nvec_nonempty), nthreads) ;
GB_memcpy (Cp, rowcount, (vlen+1) * sizeof (int64_t), nthreads) ;
}
else
{
//----------------------------------------------------------------------
// A is sliced
//----------------------------------------------------------------------
// compute the row counts of A for each slice
#define GB_PHASE_1_OF_2
#include "GB_unaryop_transpose.c"
// cumulative sum of the rowcounts across the slices
int64_t i ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0 ; i < vlen ; i++)
{
int64_t s = 0 ;
for (int taskid = 0 ; taskid < naslice ; taskid++)
{
int64_t *GB_RESTRICT rowcount = Rowcounts [taskid] ;
int64_t c = rowcount [i] ;
rowcount [i] = s ;
s += c ;
}
Cp [i] = s ;
}
Cp [vlen] = 0 ;
// compute the vector pointers for C; also compute C->nvec_nonempty
GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nthreads) ;
// add Cp back to all Rowcounts
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (i = 0 ; i < vlen ; i++)
{
int64_t s = Cp [i] ;
int64_t *GB_RESTRICT rowcount = Rowcounts [0] ;
rowcount [i] = s ;
for (int taskid = 1 ; taskid < naslice ; taskid++)
{
int64_t *GB_RESTRICT rowcount = Rowcounts [taskid] ;
rowcount [i] += s ;
}
}
}
C->magic = GB_MAGIC ; // C is now initialized ]
//--------------------------------------------------------------------------
// phase2: transpose A into C
//--------------------------------------------------------------------------
// transpose both the pattern and the values
if (op == NULL)
{
// do not apply an operator; optional typecast to ctype
GB_transpose_ix (C, A, Rowcounts, Iter, A_slice, naslice) ;
}
else
{
// apply an operator, C has type op->ztype
GB_transpose_op (C, op, A, Rowcounts, Iter, A_slice, naslice) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ;
ASSERT (!C->is_hyper) ;
(*Chandle) = C ;
return (GrB_SUCCESS) ;
}
|
spmv.c | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
* SPMV: Sparse-Matrix Dense-Vector Multiplication
* Computes the product of a sparse matrix with a dense vector.
* The sparse matrix is read from file in coordinate format, converted
* to JDS format with configurable padding and alignment for different
* devices.
*
***************************************************************************/
/***************************************************************************
*
* This benchmark was adapted to run on GPUs with OpenMP 4.0 pragmas
* and OpenCL driver implemented in gpuclang 2.0 (based on clang 3.5)
*
* Marcio M Pereira <mpereira@ic.unicamp.br>
*
***************************************************************************/
/*
* === NOTE ===
*
* The Polyhedral optmizations restricts the class of loops it can manipulate
* to sequences of imperfectly nested loops with particular constraints on the
* loop bound and array subscript expressions.
*
* To allow this optimization we fixed the problem size with __STATIC__ tag
* comment this tag to use original version.
*
*/
#ifndef __STATIC__
#define __STATIC__
#endif
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "convert_dataset.h"
#include "../../common/parboil.h"
#include "../../common/polybenchUtilFuncts.h"
#define ERROR_THRESHOLD 0.05
#define GPU 1
#ifdef __STATIC__
// Define statically the problem size
#define N 146689
#else
int N;
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
double t_start, t_end, t_start_GPU, t_end_GPU;
float *h_Ax_vector_GPU, *h_Ax_vector_CPU;
void input_vec(char *fName,float *h_vec,int dim)
{
FILE* fid = fopen(fName, "rb");
fread (h_vec, sizeof (float), dim, fid);
fclose(fid);
}
void compareResults(DATA_TYPE *A, DATA_TYPE *A_GPU)
{
int i, fail=0;
for (i=0; i < N; i++)
{
if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail);
}
static int generate_vector(float *x_vector, int dim)
{
srand(54321);
int i;
for(i=0;i<dim;i++)
{
x_vector[i] = (rand() / (float) RAND_MAX);
}
return 0;
}
double spmvGPU(int argc, char** argv) {
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL))
{
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
//vector
float *h_Ax_vector;
float *h_x_vector;
int col_count;
coo_to_jds(
parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm,
&col_count, &dim, &len, &nzcnt_len, &depth
);
h_Ax_vector=(float*)malloc(sizeof(float)*dim);
h_x_vector=(float*)malloc(sizeof(float)*dim);
input_vec( parameters->inpFiles[1],h_x_vector,dim);
#ifndef __STATIC__
N = dim;
#endif
int p, i;
t_start_GPU = rtclock();
//main execution
#pragma omp target device(GPU) \
map(to: h_nzcnt[:nzcnt_len], h_ptr[:col_count], h_indices[:len], h_data[:len], h_perm[:col_count], h_x_vector[:N]) \
map(from: h_Ax_vector[:N])
for(p=0;p<50;p++)
{
#pragma omp parallel for
for (i = 0; i < N; i++) {
int k;
float sum = 0.0f;
int bound = h_nzcnt[i];
for(k=0;k<bound;k++ ) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d*t;
}
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end_GPU = rtclock();
h_Ax_vector_GPU = h_Ax_vector;
free (h_data);
free (h_indices);
free (h_ptr);
free (h_perm);
free (h_nzcnt);
free (h_x_vector);
pb_FreeParameters(parameters);
return t_end_GPU - t_start_GPU;
}
double spmvCPU(int argc, char** argv) {
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL))
{
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
//vector
float *h_Ax_vector;
float *h_x_vector;
int col_count;
coo_to_jds(
parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm,
&col_count, &dim, &len, &nzcnt_len, &depth
);
h_Ax_vector=(float*)malloc(sizeof(float)*dim);
h_x_vector=(float*)malloc(sizeof(float)*dim);
input_vec( parameters->inpFiles[1],h_x_vector,dim);
#ifndef __STATIC__
N = dim;
#endif
int p, i;
//main execution
t_start = rtclock();
for(p=0;p<50;p++)
{
for (i = 0; i < N; i++) {
int k;
float sum = 0.0f;
int bound = h_nzcnt[i];
for(k=0;k<bound;k++ ) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d*t;
}
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end = rtclock();
h_Ax_vector_CPU = h_Ax_vector;
free (h_data);
free (h_indices);
free (h_ptr);
free (h_perm);
free (h_nzcnt);
free (h_x_vector);
pb_FreeParameters(parameters);
return t_end - t_start;
}
int main(int argc, char** argv) {
double t_GPU, t_CPU;
t_GPU = spmvGPU(argc, argv);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_GPU);
t_CPU = spmvCPU(argc, argv);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_CPU);
compareResults(h_Ax_vector_GPU, h_Ax_vector_CPU);
free (h_Ax_vector_GPU);
free (h_Ax_vector_CPU);
return 0;
}
|
quebra_md5.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#include <openssl/md5.h>
#include <locale.h>
char *hash;
int len;
clock_t begin;
float tempo;
int fim = 0;
char res[10];
const char charset[] = "@$#?!=+%abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
const int charset_size = sizeof(charset) - 1;
void compara_md5(char *string);
void aiQueBruto(char* str, int index);
void bruto_seq();
void vai(int nthreads);
void apd_tranquilo();
void inicia_csv();
int main(int argc, char *argv[]) {
if (argc < 3) {
fprintf(stderr, "Use: %s -hash -n_letras [-nthreads]\n", argv[0]);
exit(1);
}
hash = argv[1];
len = (long)strtol(argv[2], NULL, 10);
char *oldLocale = setlocale(LC_NUMERIC, NULL);
setlocale(LC_NUMERIC, "");
if (argc >= 4) {
int nthreads = strtol(argv[3], NULL, 10);
if (nthreads == 0) {
printf("Método utilizado: sequencial\n");
bruto_seq();
} else {
printf("Método utilizado: paralelismo com %i threads\n", nthreads);
vai(nthreads);
}
if (fim) printf("String: %s\nTempo: %f\n", res, tempo);
else printf("String não encontrada!\n");
} else {
apd_tranquilo();
}
setlocale(LC_NUMERIC, oldLocale);
return 0;
}
void compara_md5(char *string) {
unsigned char digest[MD5_DIGEST_LENGTH];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, string, strlen(string));
MD5_Final(digest, &ctx);
char mdString[33];
for (int i = 0; i < 16; i++)
sprintf(&mdString[i*2], "%02x", (unsigned int)digest[i]);
if (strcmp(mdString, hash) == 0) {
fim = 1;
tempo = (double)(clock() - begin) / CLOCKS_PER_SEC;
strcpy(res, string);
}
}
void aiQueBruto(char* str, int index) {
for (int i = 0; i < charset_size; ++i) {
if (!fim) {
str[index] = charset[i];
if (index == len - 1) {
str[len] = '\0';
compara_md5(str);
} else aiQueBruto(str, index + 1);
}
}
}
void bruto_seq() {
begin = clock();
char *str = malloc(len + 1);
aiQueBruto(str, 0);
free(str);
}
void vai(int nthreads) {
int p = 0;
begin = clock();
omp_set_num_threads(nthreads);
#pragma omp parallel private(p)
{
#pragma omp for schedule(dynamic)
for (p = 0; p < charset_size; ++p) {
if (!fim) {
char *str = malloc(len + 1);
str[0] = charset[p];
aiQueBruto(str, 1);
free(str);
}
}
}
}
void apd_tranquilo() {
inicia_csv();
FILE *fp;
fp = fopen("resultado.csv", "a");
for (int x = 1; x <= 5; x++) {
printf("========= Execução: %i =========\n", x);
fprintf(fp, "%i", x);
bruto_seq();
fprintf(fp, ",\"%f\"", tempo);
printf("Sequencial | Tempo: %f\n", tempo);
fim = 0;
for (int y = 1; y <= 64; y = y * 2) {
vai(y);
fprintf(fp, ",\"%f\"", tempo);
printf("Threads: %2i | Tempo: %f\n", y, tempo);
fim = 0;
}
fprintf(fp, "\n");
}
printf("===============================\nString: %s\n", res);
fclose(fp);
}
void inicia_csv() {
FILE *fp;
fp = fopen("resultado.csv", "w");
fprintf(fp, ",Sequencial");
for (int y = 1; y <= 64; y = y * 2) {
fprintf(fp, ",%i", y);
}
fprintf(fp, "\n");
fclose(fp);
}
|
ic0_csc_inspector.h | //
//
#include <cstdio>
#include <vector>
#include <assert.h>
#include<set>
#undef MIN
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#undef MAX
#define MAX(x,y) ((x) > (y) ? (x) : (y))
/*
* Computes the DAG of dependency after simplification
*/
void ic0_csc_inspector(int n, int *colPtr, int *rowIdx, std::vector<std::vector<int>>& DAG){
// Inspector from r3
#pragma omp parallel for schedule(auto)
for(int i = 0 ; i <= n-1 ; i++ ){
for(int m = colPtr[i]+1 ; m < colPtr[i+1] ; m++ ){
if( i < rowIdx[m] ){
int ip = rowIdx[m];
int k = colPtr[ip];
for(int l = m ; l < colPtr[i+1] ; l++ ){
if(rowIdx[(l)] == rowIdx[(k)] && rowIdx[(l + 1)] <= rowIdx[(l)] ){
DAG[i].push_back(ip);
}
}
}
}
}
//Inspectro from r22
#pragma omp parallel for schedule(auto)
for(int i = 0; i <= n-1; i++) {
for(int m = colPtr[(i)]+1; m <= colPtr[(i+1)]-1; m++) {
if (rowIdx[(m)] >= i+1 && n >= rowIdx[(m)]) {//if (rowIdx[(m)] >= i+1 && n >= rowIdx[(m)]+2) {
for(int k = colPtr[rowIdx[m]]; k <= colPtr[rowIdx[m]+1]-1; k++) {
for(int l = m; l <= colPtr[(i+1)]-1; l++) {
if (rowIdx[(l)] == rowIdx[(k)] && rowIdx[(l + 1)] <= rowIdx[(l)] ) {
int ip=rowIdx[(m)];
if (colPtr[(ip+1)]+1 >= k+1 && k >= colPtr[(ip)] ) {
DAG[i].push_back(ip);
//std::cout <<"---------- computeDAG_Test: DAG["<<i<<"]["<<ip<<"] \n";
}
}
}
}
}
}
}
/*
// Omega generated Code Generated for r22
#define rowIdx(i1,i2) rowIdx[i1]
#define rowIdx_(i1,i2,i3) rowIdx[i3]
#define rowIdx__(i1,i2,i3,i4) rowIdx[i4]
#define rowIdx___(i1,i2,i3,i4) rowIdx[i4 + 1]
#define colPtr(i1) colPtr[i1]
#define colPtr_(i1) colPtr[i1 + 1]
#define colPtr__(i,m,k,l,ip) colPtr[ip]
#define colPtr___(i1,i2,i3,i4,o1) colPtr[o1 + 1]
#define colPtr____(i1,i2) colPtr[rowIdx[i2]]
#define colPtr_____(i1,i2) colPtr[rowIdx[i2] + 1]
int t1,t2,t3,t4,t5;
#pragma omp parallel for schedule(auto)
for(t1 = 0; t1 <= n-1; t1++) {
for(t2 = colPtr(t1)+1; t2 <= colPtr_(t1)-1; t2++) {
if (rowIdx(t1,t2) >= t1+1 && n >= rowIdx(t1,t2)+2) {
for(t3 = colPtr____(t1,t2); t3 <= colPtr_____(t1,t2)-1; t3++) {
for(t4 = t2; t4 <= colPtr_(t1)-1; t4++) {
if (rowIdx__(t1,t2,t3,t4) == rowIdx_(t1,t2,t3) && rowIdx__(t1,t2,t3,t4) >= rowIdx___(t1,t2,t3,t4)) {
t5=rowIdx(t1,t2);
if (colPtr___(t1,t2,t3,t4,t5) >= t3+1 && t3 >= colPtr__(t1,t2,t3,t4,t5)+1) {
DAG[t1].push_back(t5);
}
}
}
}
}
}
}
*/
}
void ic0_csc_inspector_app(int n, int *colPtr, int *rowIdx, std::vector<std::vector<int>>& DAG){
// Inspector from r3
#pragma omp parallel for schedule(auto)
for(int i = 0 ; i <= n-1 ; i++ ){
for(int m = colPtr[i]+1 ; m < colPtr[i+1] ; m++ ){
if( i < rowIdx[m] ){
int ip = rowIdx[m];
if(colPtr[ip] < colPtr[rowIdx[m] + 1] && colPtr[rowIdx[m]] < colPtr[ip]+1 ){
DAG[i].push_back(ip);
}
}
}
}
/*
//Inspectro from r22
for(int i = 0 ; i <= n-1 ; i++ ){
for(int m = colPtr[i]+1 ; m < colPtr[i+1] ; m++ ){
if( i < rowIdx[m] ){
int ip = rowIdx[m];
if(colPtr[rowIdx[m]] < colPtr[rowIdx[m] + 1] && colPtr[ip] < colPtr[ip + 1] &&
colPtr[ip] < colPtr[rowIdx[m] + 1] && colPtr[rowIdx[m]] < colPtr[ip + 1] ){
DAG[i].push_back(ip);
}
}
}
}
/*
// Omega generated Code Generated for r22
#define rowIdx(i,m) rowIdx[m]
#define rowIdx_(i,m,k) rowIdx[k]
#define rowIdx__(i,m,k,l) rowIdx[l]
#define rowIdx___(i1,i2,i3,i4) rowIdx[i4 + 1]
#define colPtr(i1) colPtr[i1]
#define colPtr_(i1) colPtr[i1 + 1]
#define colPtr__(i,m,k,l,ip) colPtr[ip]
#define colPtr___(i1,i2,i3,i4,o1) colPtr[o1 + 1]
#define colPtr____(i1,i2) colPtr[rowIdx[i2]]
#define colPtr_____(i1,i2) colPtr[rowIdx[i2] + 1]
int t1,t2,t3,t4,t5;
for(t1 = 0; t1 <= n-3; t1++) {
for(t2 = colPtr(t1)+1; t2 <= colPtr_(t1)-1; t2++) {
if (rowIdx(t1,t2) >= t1+1 && n >= rowIdx(t1,t2)+2) {
for(t3 = colPtr____(t1,t2); t3 <= colPtr_____(t1,t2)-1; t3++) {
for(t4 = t2; t4 <= colPtr_(t1)-1; t4++) {
if (rowIdx__(t1,t2,t3,t4) == rowIdx_(t1,t2,t3) && rowIdx__(t1,t2,t3,t4) >= rowIdx___(t1,t2,t3,t4)) {
t5=rowIdx(t1,t2);
if (colPtr___(t1,t2,t3,t4,t5) >= t3+1 && t3 >= colPtr__(t1,t2,t3,t4,t5)+1) {
DAG[t1].push_back(t5);
}
}
}
}
}
}
}
*/
}
/*
* Computes the DAG of dependency after simplification
*/
void ic0_csc_inspector(int n, int *colPtr, int *rowIdx, std::vector<std::set<int>>& DAG){
// Inspector from r3
#pragma omp parallel for schedule(auto)
for(int i = 0 ; i <= n-1 ; i++ ){
for(int m = colPtr[i]+1 ; m < colPtr[i+1] ; m++ ){
if( i < rowIdx[m] ){
int ip = rowIdx[m];
int k = colPtr[ip];
for(int l = m ; l < colPtr[i+1] ; l++ ){
if(rowIdx[(l)] == rowIdx[(k)] && rowIdx[(l + 1)] <= rowIdx[(l)] ){
DAG[i].insert(ip);
}
}
}
}
}
//Inspectro from r22
#pragma omp parallel for schedule(auto)
for(int i = 0; i <= n-1; i++) {
for(int m = colPtr[(i)]+1; m <= colPtr[(i+1)]-1; m++) {
if (rowIdx[(m)] >= i+1 && n >= rowIdx[(m)]) {//if (rowIdx[(m)] >= i+1 && n >= rowIdx[(m)]+2) {
for(int k = colPtr[rowIdx[m]]; k <= colPtr[rowIdx[m]+1]-1; k++) {
for(int l = m; l <= colPtr[(i+1)]-1; l++) {
if (rowIdx[(l)] == rowIdx[(k)] && rowIdx[(l + 1)] <= rowIdx[(l)] ) {
int ip=rowIdx[(m)];
if (colPtr[(ip+1)]+1 >= k+1 && k >= colPtr[(ip)] ) {
DAG[i].insert( ip );
}
}
}
}
}
}
}
}
/*
* Computes the DAG of dependency after simplification
*/
void ic0_csc_inspector_omega(int n, int *colPtr, int *rowIdx, std::vector<std::vector<int>>& DAG){
#define colPtr(In_2) colPtr[In_2]
#define colPtr_(In_2) colPtr[In_2 + 1]
#define colPtr__(In_2, In_4, In_8, Out_2) colPtr[Out_2]
#define colPtr___(In_2, In_4) colPtr[rowIdx[In_4]]
#define colPtr____(In_2, In_4) colPtr[rowIdx[In_4] + 1]
#define rowIdx(In_2, In_4) rowIdx[In_4]
#define rowIdx_(In_2, In_4, In_8, Out_2,In_6) rowIdx[In_6]
#define rowIdx__(In_2, In_4, In_8) rowIdx[In_8]
#define rowIdx___(In_2, In_4, In_8) rowIdx[In_8 + 1]
// Omega generated Code Generated for r3
#pragma omp parallel for schedule(auto)
for(int t1 = 0; t1 <= n-1; t1++) {
for(int t2 = colPtr(t1)+1; t2 <= colPtr_(t1)-1; t2++) {
if (colPtr____(t1,t2) >= colPtr___(t1,t2)+1 && n >= rowIdx(t1,t2)+2 && rowIdx(t1,t2) >= t1+1) {
for(int t3 = t2; t3 <= colPtr_(t1)-1; t3++) {
if (rowIdx__(t1,t2,t3) >= rowIdx___(t1,t2,t3)) {
int t4=rowIdx(t1,t2);
for(int t5 = colPtr___(t1,t2); t5 <= colPtr____(t1,t2)-1; t5++) {
if (t5 == colPtr__(t1,t2,t3,t4) && rowIdx_(t1,t2,t3,t4,t5) == rowIdx__(t1,t2,t3)) {
DAG[t1].push_back(t4);
}
}
}
}
}
}
}
#undef colPtr
#undef colPtr_
#undef colPtr__
#undef colPtr___
#undef colPtr____
#undef rowIdx
#undef rowIdx_
#undef rowIdx__
#undef rowIdx___
// Omega generated Code Generated for r22
#define rowIdx(i1,i2) rowIdx[i1]
#define rowIdx_(i1,i2,i3) rowIdx[i3]
#define rowIdx__(i1,i2,i3,i4) rowIdx[i4]
#define rowIdx___(i1,i2,i3,i4) rowIdx[i4 + 1]
#define colPtr(i1) colPtr[i1]
#define colPtr_(i1) colPtr[i1 + 1]
#define colPtr__(i,m,k,l,ip) colPtr[ip]
#define colPtr___(i1,i2,i3,i4,o1) colPtr[o1 + 1]
#define colPtr____(i1,i2) colPtr[rowIdx[i2]]
#define colPtr_____(i1,i2) colPtr[rowIdx[i2] + 1]
#pragma omp parallel for schedule(auto)
for(int t1 = 0; t1 <= n-1; t1++) {
for(int t2 = colPtr(t1)+1; t2 <= colPtr_(t1)-1; t2++) {
if (rowIdx(t1,t2) >= t1+1 && n >= rowIdx(t1,t2)+2) {
for(int t3 = colPtr____(t1,t2); t3 <= colPtr_____(t1,t2)-1; t3++) {
for(int t4 = t2; t4 <= colPtr_(t1)-1; t4++) {
if (rowIdx__(t1,t2,t3,t4) == rowIdx_(t1,t2,t3) && rowIdx__(t1,t2,t3,t4) >= rowIdx___(t1,t2,t3,t4)) {
int t5=rowIdx(t1,t2);
if (colPtr___(t1,t2,t3,t4,t5) >= t3+1 && t3 >= colPtr__(t1,t2,t3,t4,t5)+1) {
DAG[t1].push_back(t5);
}
}
}
}
}
}
}
}
|
attention.c | #include "darknet.h"
#include <sys/time.h>
#include <assert.h>
void extend_data_truth(data *d, int n, float val) {
int i, j;
for (i = 0; i < d->y.rows; ++i) {
d->y.vals[i] = realloc(d->y.vals[i], (d->y.cols + n) * sizeof(float));
for (j = 0; j < n; ++j) {
d->y.vals[i][d->y.cols + j] = val;
}
}
d->y.cols += n;
}
matrix network_loss_data(network *net, data test) {
int i, b;
int k = 1;
matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net->batch * test.X.cols, sizeof(float));
float *y = calloc(net->batch * test.y.cols, sizeof(float));
for (i = 0; i < test.X.rows; i += net->batch) {
for (b = 0; b < net->batch; ++b) {
if (i + b == test.X.rows) break;
memcpy(X + b * test.X.cols, test.X.vals[i + b], test.X.cols * sizeof(float));
memcpy(y + b * test.y.cols, test.y.vals[i + b], test.y.cols * sizeof(float));
}
network orig = *net;
net->input = X;
net->truth = y;
net->train = 0;
net->delta = 0;
forward_network(net);
*net = orig;
float *delta = net->layers[net->n - 1].output;
for (b = 0; b < net->batch; ++b) {
if (i + b == test.X.rows) break;
int t = max_index(y + b * test.y.cols, 1000);
float err = sum_array(delta + b * net->outputs, net->outputs);
pred.vals[i + b][0] = -err;
//pred.vals[i+b][0] = 1-delta[b*net->outputs + t];
}
}
free(X);
free(y);
return pred;
}
void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) {
int i, j;
float avg_cls_loss = -1;
float avg_att_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
network **nets = calloc(ngpus, sizeof(network * ));
srand(time(0));
int seed = rand();
for (i = 0; i < ngpus; ++i) {
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *train_list = option_find_str(options, "train", "data/train.list");
int classes = option_find_int(options, "classes", 2);
char **labels = get_labels(label_list);
list *plist = get_paths(train_list);
char **paths = (char **) list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
double time;
int divs = 3;
int size = 2;
load_args args = {0};
args.w = divs * net->w / size;
args.h = divs * net->h / size;
args.size = divs * net->w / size;
args.threads = 32;
args.hierarchy = net->hierarchy;
args.min = net->min_ratio * args.w;
args.max = net->max_ratio * args.w;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.saturation = net->saturation;
args.hue = net->hue;
args.paths = paths;
args.classes = classes;
args.n = imgs;
args.m = N;
args.labels = labels;
args.type = CLASSIFICATION_DATA;
data train;
data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
int epoch = (*net->seen) / N;
while (get_current_batch(net) < net->max_batches || net->max_batches == 0) {
time = what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
data resized = resize_data(train, net->w, net->h);
extend_data_truth(&resized, divs * divs, 0);
data *tiles = tile_data(train, divs, size);
printf("Loaded: %lf seconds\n", what_time_is_it_now() - time);
time = what_time_is_it_now();
float aloss = 0;
float closs = 0;
int z;
for (i = 0; i < divs * divs / ngpus; ++i) {
#pragma omp parallel for
for (j = 0; j < ngpus; ++j) {
int index = i * ngpus + j;
extend_data_truth(tiles + index, divs * divs, SECRET_NUM);
matrix deltas = network_loss_data(nets[j], tiles[index]);
for (z = 0; z < resized.y.rows; ++z) {
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
}
free_matrix(deltas);
}
}
int *inds = calloc(resized.y.rows, sizeof(int));
for (z = 0; z < resized.y.rows; ++z) {
int index = max_index(resized.y.vals[z] + train.y.cols, divs * divs);
inds[z] = index;
for (i = 0; i < divs * divs; ++i) {
resized.y.vals[z][train.y.cols + i] = (i == index) ? 1 : 0;
}
}
data best = select_data(tiles, inds);
free(inds);
#ifdef GPU
if (ngpus == 1) {
closs = train_network(net, best);
} else {
closs = train_networks(nets, ngpus, best, 4);
}
#endif
for (i = 0; i < divs * divs; ++i) {
printf("%.2f ", resized.y.vals[0][train.y.cols + i]);
if ((i + 1) % divs == 0) printf("\n");
free_data(tiles[i]);
}
free_data(best);
printf("\n");
image im = float_to_image(64, 64, 3, resized.X.vals[0]);
//show_image(im, "orig");
//cvWaitKey(100);
/*
image im1 = float_to_image(64,64,3,tiles[i].X.vals[0]);
image im2 = float_to_image(64,64,3,resized.X.vals[0]);
show_image(im1, "tile");
show_image(im2, "res");
*/
#ifdef GPU
if (ngpus == 1) {
aloss = train_network(net, resized);
} else {
aloss = train_networks(nets, ngpus, resized, 4);
}
#endif
for (i = 0; i < divs * divs; ++i) {
printf("%f ", nets[0]->output[1000 + i]);
if ((i + 1) % divs == 0) printf("\n");
}
printf("\n");
free_data(resized);
free_data(train);
if (avg_cls_loss == -1) avg_cls_loss = closs;
if (avg_att_loss == -1) avg_att_loss = aloss;
avg_cls_loss = avg_cls_loss * .9 + closs * .1;
avg_att_loss = avg_att_loss * .9 + aloss * .1;
printf("%ld, %.3f: Att: %f, %f avg, Class: %f, %f avg, %f rate, %lf seconds, %ld images\n",
get_current_batch(net), (float) (*net->seen) / N, aloss, avg_att_loss, closs, avg_cls_loss,
get_current_rate(net), what_time_is_it_now() - time, *net->seen);
if (*net->seen / N > epoch) {
epoch = *net->seen / N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, epoch);
save_weights(net, buff);
}
if (get_current_batch(net) % 1000 == 0) {
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
pthread_join(load_thread, 0);
free_network(net);
free_ptrs((void **) labels, classes);
free_ptrs((void **) paths, plist->size);
free_list(plist);
free(base);
}
void validate_attention_single(char *datacfg, char *filename, char *weightfile) {
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *leaf_list = option_find_str(options, "leaves", 0);
if (leaf_list) change_leaves(net->hierarchy, leaf_list);
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
char **paths = (char **) list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = calloc(topk, sizeof(int));
int divs = 4;
int size = 2;
int extra = 0;
float *avgs = calloc(classes, sizeof(float));
int *inds = calloc(divs * divs, sizeof(int));
for (i = 0; i < m; ++i) {
int
class = -1;
char *path = paths[i];
for (j = 0; j < classes; ++j) {
if (strstr(path, labels[j])) {
class = j;
break;
}
}
image im = load_image_color(paths[i], 0, 0);
image resized = resize_min(im, net->w * divs / size);
image crop = crop_image(resized, (resized.w - net->w * divs / size) / 2, (resized.h - net->h * divs / size) / 2,
net->w * divs / size, net->h * divs / size);
image rcrop = resize_image(crop, net->w, net->h);
//show_image(im, "orig");
//show_image(crop, "cropped");
//cvWaitKey(0);
float *pred = network_predict(net, rcrop.data);
//pred[classes + 56] = 0;
for (j = 0; j < divs * divs; ++j) {
printf("%.2f ", pred[classes + j]);
if ((j + 1) % divs == 0) printf("\n");
}
printf("\n");
copy_cpu(classes, pred, 1, avgs, 1);
top_k(pred + classes, divs * divs, divs * divs, inds);
show_image(crop, "crop");
for (j = 0; j < extra; ++j) {
int index = inds[j];
int row = index / divs;
int col = index % divs;
int y = row * crop.h / divs - (net->h - crop.h / divs) / 2;
int x = col * crop.w / divs - (net->w - crop.w / divs) / 2;
printf("%d %d %d %d\n", row, col, y, x);
image tile = crop_image(crop, x, y, net->w, net->h);
float *pred = network_predict(net, tile.data);
axpy_cpu(classes, 1., pred, 1, avgs, 1);
show_image(tile, "tile");
//cvWaitKey(10);
}
if (net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
if (rcrop.data != resized.data) free_image(rcrop);
if (resized.data != im.data) free_image(resized);
free_image(im);
free_image(crop);
top_k(pred, classes, topk, indexes);
if (indexes[0] == class) avg_acc += 1;
for (j = 0; j < topk; ++j) {
if (indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc / (i + 1), topk, avg_topk / (i + 1));
}
}
void validate_attention_multi(char *datacfg, char *filename, char *weightfile) {
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
int scales[] = {224, 288, 320, 352, 384};
int nscales = sizeof(scales) / sizeof(scales[0]);
char **paths = (char **) list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = calloc(topk, sizeof(int));
for (i = 0; i < m; ++i) {
int
class = -1;
char *path = paths[i];
for (j = 0; j < classes; ++j) {
if (strstr(path, labels[j])) {
class = j;
break;
}
}
float *pred = calloc(classes, sizeof(float));
image im = load_image_color(paths[i], 0, 0);
for (j = 0; j < nscales; ++j) {
image r = resize_min(im, scales[j]);
resize_network(net, r.w, r.h);
float *p = network_predict(net, r.data);
if (net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1, 1);
axpy_cpu(classes, 1, p, 1, pred, 1);
flip_image(r);
p = network_predict(net, r.data);
axpy_cpu(classes, 1, p, 1, pred, 1);
if (r.data != im.data) free_image(r);
}
free_image(im);
top_k(pred, classes, topk, indexes);
free(pred);
if (indexes[0] == class) avg_acc += 1;
for (j = 0; j < topk; ++j) {
if (indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc / (i + 1), topk, avg_topk / (i + 1));
}
}
void predict_attention(char *datacfg, char *cfgfile, char *weightfile, char *filename, int top) {
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if (!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
if (top == 0) top = option_find_int(options, "top", 1);
int i = 0;
char **names = get_labels(name_list);
clock_t time;
int *indexes = calloc(top, sizeof(int));
char buff[256];
char *input = buff;
while (1) {
if (filename) {
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if (!input) return;
strtok(input, "\n");
}
image im = load_image_color(input, 0, 0);
image r = letterbox_image(im, net->w, net->h);
//resize_network(&net, r.w, r.h);
//printf("%d %d\n", r.w, r.h);
float *X = r.data;
time = clock();
float *predictions = network_predict(net, X);
if (net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
top_k(predictions, net->outputs, top, indexes);
fprintf(stderr, "%s: Predicted in %f seconds.\n", input, sec(clock() - time));
for (i = 0; i < top; ++i) {
int index = indexes[i];
//if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index, names[index], predictions[index], (net->hierarchy->parent[index] >= 0) ? names[net->hierarchy->parent[index]] : "Root");
//else printf("%s: %f\n",names[index], predictions[index]);
printf("%5.2f%%: %s\n", predictions[index] * 100, names[index]);
}
if (r.data != im.data) free_image(r);
free_image(im);
if (filename) break;
}
}
void run_attention(int argc, char **argv) {
if (argc < 4) {
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
int ngpus;
int *gpus = read_intlist(gpu_list, &ngpus, gpu_index);
int top = find_int_arg(argc, argv, "-t", 0);
int clear = find_arg(argc, argv, "-clear");
char *data = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6] : 0;
char *layer_s = (argc > 7) ? argv[7] : 0;
if (0 == strcmp(argv[2], "predict")) predict_attention(data, cfg, weights, filename, top);
else if (0 == strcmp(argv[2], "train")) train_attention(data, cfg, weights, gpus, ngpus, clear);
else if (0 == strcmp(argv[2], "valid")) validate_attention_single(data, cfg, weights);
else if (0 == strcmp(argv[2], "validmulti")) validate_attention_multi(data, cfg, weights);
}
|
oskar_dftw_c2c_3d_omp.c | /*
* Copyright (c) 2013, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "math/oskar_dftw_c2c_3d_omp.h"
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_dftw_c2c_3d_omp_f(const int n_in, const float wavenumber,
const float* x_in, const float* y_in, const float* z_in,
const float2* weights_in, const int n_out, const float* x_out,
const float* y_out, const float* z_out, const float2* data,
float2* output)
{
int i_out = 0;
/* Loop over output points. */
#pragma omp parallel for private(i_out)
for (i_out = 0; i_out < n_out; ++i_out)
{
int i;
float xp_out, yp_out, zp_out;
float2 out;
/* Clear output value. */
out.x = 0.0f;
out.y = 0.0f;
/* Get the output position. */
xp_out = wavenumber * x_out[i_out];
yp_out = wavenumber * y_out[i_out];
zp_out = wavenumber * z_out[i_out];
/* Loop over input points. */
for (i = 0; i < n_in; ++i)
{
float2 temp, w;
float a;
/* Calculate the phase for the output position. */
a = xp_out * x_in[i] + yp_out * y_in[i] + zp_out * z_in[i];
temp.x = cosf(a);
temp.y = sinf(a);
/* Multiply the supplied DFT weight by the computed phase. */
w = weights_in[i];
a = w.x;
w.x *= temp.x;
w.x -= w.y * temp.y;
w.y *= temp.x;
w.y += a * temp.y;
/* Perform complex multiply-accumulate. */
temp = data[i * n_out + i_out];
out.x += w.x * temp.x;
out.x -= w.y * temp.y;
out.y += w.y * temp.x;
out.y += w.x * temp.y;
}
/* Store the output point. */
output[i_out] = out;
}
}
/* Double precision. */
void oskar_dftw_c2c_3d_omp_d(const int n_in, const double wavenumber,
const double* x_in, const double* y_in, const double* z_in,
const double2* weights_in, const int n_out, const double* x_out,
const double* y_out, const double* z_out, const double2* data,
double2* output)
{
int i_out = 0;
/* Loop over output points. */
#pragma omp parallel for private(i_out)
for (i_out = 0; i_out < n_out; ++i_out)
{
int i;
double xp_out, yp_out, zp_out;
double2 out;
/* Clear output value. */
out.x = 0.0;
out.y = 0.0;
/* Get the output position. */
xp_out = wavenumber * x_out[i_out];
yp_out = wavenumber * y_out[i_out];
zp_out = wavenumber * z_out[i_out];
/* Loop over input points. */
for (i = 0; i < n_in; ++i)
{
double2 temp, w;
double a;
/* Calculate the phase for the output position. */
a = xp_out * x_in[i] + yp_out * y_in[i] + zp_out * z_in[i];
temp.x = cos(a);
temp.y = sin(a);
/* Multiply the supplied DFT weight by the computed phase. */
w = weights_in[i];
a = w.x;
w.x *= temp.x;
w.x -= w.y * temp.y;
w.y *= temp.x;
w.y += a * temp.y;
/* Perform complex multiply-accumulate. */
temp = data[i * n_out + i_out];
out.x += w.x * temp.x;
out.x -= w.y * temp.y;
out.y += w.y * temp.x;
out.y += w.x * temp.y;
}
/* Store the output point. */
output[i_out] = out;
}
}
#ifdef __cplusplus
}
#endif
|
conv_direct_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "convolution_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v)
{
int8_t* ptr = input;
int8_t* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, (size_t)in_w * sizeof(int8_t));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static int conv3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int conv3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_param* param, int num_thread)
{
int inch = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_hw = inh * inw;
int outch = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_size = output_tensor->elem_num;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t));
memset(output_int32, 0, out_size * sizeof(int32_t));
float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float));
int8_t* output_int8 = output_tensor->data;
int8_t* input_int8 = input_tensor->data;
int32_t* bias_int32 = NULL;
if(bias_tensor)
bias_int32 = bias_tensor->data;
/* get scale value of quantizaiton */
float input_scale = input_tensor->scale;
float* kernel_scales = weight_tensor->scale_list;
float output_scale = output_tensor->scale;
const signed char* kernel = weight_tensor->data;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
int8_t* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input_int8;
else
{
input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < inch; g++)
{
int8_t* pad_in = input_int8 + g * inh * inw;
int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0);
}
}
int tailstep = inw_tmp - 2 * outw + inw_tmp;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
int32_t* out0 = output_int32 + p * out_hw;
int8_t* kernel0 = (int8_t* )kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp;
int8_t* r0 = img0;
int8_t* r1 = img0 + inw_tmp;
int8_t* r2 = img0 + inw_tmp * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += ( int )r0[0] * kernel0[0];
sum0 += ( int )r0[1] * kernel0[1];
sum0 += ( int )r0[2] * kernel0[2];
sum0 += ( int )r1[0] * kernel0[3];
sum0 += ( int )r1[1] * kernel0[4];
sum0 += ( int )r1[2] * kernel0[5];
sum0 += ( int )r2[0] * kernel0[6];
sum0 += ( int )r2[1] * kernel0[7];
sum0 += ( int )r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
/* process bias and dequant output from int32 to fp32 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (bias_tensor)
output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i];
else
output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i];
}
}
/* process activation relu */
if (param->activation == 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
/* quant from fp32 to int8 */
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < outch; i++)
{
for (int j = 0; j < outh * outw; j++)
{
int output_off = i * (outh * outw) + j;
int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale));
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_int8[output_off] = (int8_t)data_i32;
}
}
sys_free(output_int32);
sys_free(output_fp32);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor = NULL;
struct tensor* output_tensor = NULL;
int num_thread = exec_graph->num_thread;
/* set the input data and shape again, in case of reshape or dynamic shape */
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
if (ir_node->input_num > 2)
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem;
int ret = -1;
switch(conv_param->stride_h)
{
case 1:
ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
case 2:
ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread);
break;
default:
TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h);
}
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem;
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
int group = param->group;
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int pad_h1 = param->pad_h1;
int pad_w1 = param->pad_w1;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* only support int8 */
if (input_tensor->data_type != TENGINE_DT_INT8)
return 0;
if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 &&
((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2)))
return OPS_SCORE_BEST * 2;
else
return 0;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_conv_direct_hcl_x86_op()
{
return register_builtin_node_ops(OP_CONV, &hcl_node_ops);
}
int unregister_conv_direct_hcl_x86_op()
{
unregister_builtin_node_ops(OP_CONV, &hcl_node_ops);
return 0;
}
|
GB_binop__band_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__band_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint32)
// A*D function (colscale): GB (_AxD__band_uint32)
// D*A function (rowscale): GB (_DxB__band_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__band_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__band_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint32)
// C=scalar+B GB (_bind1st__band_uint32)
// C=scalar+B' GB (_bind1st_tran__band_uint32)
// C=A+scalar GB (_bind2nd__band_uint32)
// C=A'+scalar GB (_bind2nd_tran__band_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT32 || GxB_NO_BAND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__band_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
opencl_7z_fmt_plug.c | /*
* This software is Copyright (c) 2015 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_sevenzip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_sevenzip);
#else
#include <string.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
#include "common-opencl.h"
#include "options.h"
#include "crc32.h"
#include "stdint.h"
#include "unicode.h"
#include "memdbg.h"
#define FORMAT_LABEL "7z-opencl"
#define FORMAT_NAME "7-Zip"
#define FORMAT_TAG "$7z$"
#define TAG_LENGTH 4
#define ALGORITHM_NAME "SHA256 OPENCL AES"
#define BENCHMARK_COMMENT " (512K iterations)"
#define BENCHMARK_LENGTH 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define PLAINTEXT_LENGTH ((55-8)/2) // 23, rar3 uses 22
#define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH)
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#define BIG_ENOUGH (8192 * 32)
typedef struct {
uint32_t length;
uint16_t v[PLAINTEXT_LENGTH];
} sevenzip_password;
typedef struct {
uint round;
uint8_t key[32];
} sevenzip_hash;
typedef struct {
uint32_t length;
uint32_t iterations;
uint8_t salt[16];
} sevenzip_salt;
typedef struct {
cl_uint total[2];
cl_uint state[8];
cl_uchar buffer[64];
} SHA256_CTX;
typedef struct {
cl_ulong t;
SHA256_CTX ctx;
cl_uint len;
cl_ushort buffer[PLAINTEXT_LENGTH];
} sevenzip_state;
static int *cracked;
static int any_cracked;
static int new_keys;
static struct custom_salt {
int NumCyclesPower;
int SaltSize;
int ivSize;
int type;
unsigned char data[BIG_ENOUGH];
unsigned char iv[16];
unsigned char salt[16];
unsigned int crc;
int length; /* used in decryption */
int unpacksize; /* used in CRC calculation */
} *cur_salt;
static struct fmt_tests sevenzip_tests[] = {
/* CRC checks passes for these hashes */
{"$7z$0$19$0$1122$8$d1f50227759415890000000000000000$1412385885$112$112$5e5b8b734adf52a64c541a5a5369023d7cccb78bd910c0092535dfb013a5df84ac692c5311d2e7bbdc580f5b867f7b5dd43830f7b4f37e41c7277e228fb92a6dd854a31646ad117654182253706dae0c069d3f4ce46121d52b6f20741a0bb39fc61113ce14d22f9184adafd6b5333fb1", "password"},
{"$7z$0$19$0$1122$8$a264c94f2cd72bec0000000000000000$725883103$112$108$64749c0963e20c74602379ca740165b9511204619859d1914819bc427b7e5f0f8fc67f53a0b53c114f6fcf4542a28e4a9d3914b4bc76baaa616d6a7ec9efc3f051cb330b682691193e6fa48159208329460c3025fb273232b82450645f2c12a9ea38b53a2331a1d0858813c8bf25a831", "openwall"},
/* padding check passes for these hashes */
{"$7z$0$19$0$1122$8$732b59fd26896e410000000000000000$2955316379$192$183$7544a3a7ec3eb99a33d80e57907e28fb8d0e140ec85123cf90740900429136dcc8ba0692b7e356a4d4e30062da546a66b92ec04c64c0e85b22e3c9a823abef0b57e8d7b8564760611442ecceb2ca723033766d9f7c848e5d234ca6c7863a2683f38d4605322320765938049305655f7fb0ad44d8781fec1bf7a2cb3843f269c6aca757e509577b5592b60b8977577c20aef4f990d2cb665de948004f16da9bf5507bf27b60805f16a9fcc4983208297d3affc4455ca44f9947221216f58c337f", "password"},
/* not supported hashes, will require validFolder check */
// {"$7z$0$19$0$1122$8$5fdbec1569ff58060000000000000000$2465353234$112$112$58ba7606aafc7918e3db7f6e0920f410f61f01e9c1533c40850992fee4c5e5215bc6b4ea145313d0ac065b8ec5b47d9fb895bb7f97609be46107d71e219544cfd24b52c2ecd65477f72c466915dcd71b80782b1ac46678ab7f437fd9f7b8e9d9fad54281d252de2a7ae386a65fc69eda", "password"},
#if DEBUG
{"$7z$0$19$0$1122$8$94fb9024fdd3e6c40000000000000000$3965424295$112$99$1127828817ff126bc45ff3c5225d9d0c5d00a52094909674e6ed3dc431546d9a672738f2fa07556340d604d2efd2901b9d2ac2c0686c25af9c520c137b16c50c54df8703fd0b0606fa721ad70aafb9c4e3b288ef49864e6034021969b4ce11e3b8e269a92090ccf593c6a0da06262116", ""},
{"$7z$0$19$0$1122$8$6fd059d516d5490f0000000000000000$460747259$112$99$af163eb5532c557efca78fbb448aa04f348cd258c94233e6669f4e5025f220274c244d4f2347a7512571d9b6015a1e1a90e281983b743da957437b33092eddb55a5bc76f3ab6c7dbabb001578d1043285f5fa791fd94dd9779b461e44cbfe869f891007335b766774ccee3813ec8cd57", "&"},
{"$7z$0$19$0$1122$8$6d4a12af68d83bfe0000000000000000$993697592$112$99$7c308faa36b667599ee4418435ab621884c5c115ee3b70be454fe99236422f4f2d5cd9c8fcfbe6b6b0805ee602ce8488a08f7ea14a4f5c0c060fc685bff187720a402b23a5cfe3c9c5a5ae07f91209031b8f9804ac10459e15a0158031f6c58e507401ec6e1e6de8f64d94201159432b", "&'"},
{"$7z$0$19$0$1122$8$7527d758a59181830000000000000000$3917710544$112$99$61a9ca9e835bd0f2dc474b34d5d89bcf8cd1bb071a984ee1dcf224174a60bcee140fcf2fde8927fe4f3f4eb4a2cc39faff73f1898ae25cc92bd02939f4317ebb173bf3b6f01eef183163ddd533ad5c076f87341bd8b86d8460c68fc390aa8df89fc4076bdfd24e157f6c07e105c07612", "&'("},
{"$7z$0$19$0$1122$8$68928bade860a2b80000000000000000$3235890186$112$99$4b685a569c3aed78d217bae9ec64fa06b614df55c1cb0d160563d87efe38813accb38dd7037f86cebc91751c2488769c7398dfefaf491c024f2d640dcb388a56404cd5ac475ba16b5f8206fa45d5923b3a0c8dd0f24460ccee0d93bea03ad58b8a8db502a55ba1775560b3d194f342f7", "&'()"},
{"$7z$0$19$0$1122$8$81931b9ba0b069820000000000000000$3094344848$112$99$fdbb2622143d25b13992b1467ce9edce4e3df8ca07535735b76e8abcb0791e384a1d5547483e19c3bd6e5a0742d29c403cfc8b3a003b285e80b350ea9157600eb91c49b329903de9ec9b17d1c95b0e136b579e165a6e80550464fa99830bfd9ee58fc14516b614ff9f84ec80e6880a36", "&'()*"},
{"$7z$0$19$0$1122$8$ccf696913989510d0000000000000000$1238556212$112$99$647264fbc665e73ecfe3ef7055fef0d91cb86833d6df08b2f7a3c1c89cf7cdaa09a802c8bfb2e5c6b55143a315df74d841b349fc8b43613d0f87cc90325fd56fc17ee08df7ce76cdc9cda61bd4d5632e20af3db16e921c755174f291c0aa6581844def4547380e2dd4a574435d17e1e8", "&'()*+"},
{"$7z$0$19$0$1122$8$d618bd3ec8bafd800000000000000000$1349785$112$99$6514e2e7468e6f0ed63796cfc0588ac2d75f024c4a0fa03778bd252d316d03e48a08ffcc0011725ad4f867e9a9666630dff4f352c59bcbadb94b9d0e2c42d653b80f480005ce868a0b1a075b2e00abd743de0867d69cdc8b56c7f9770537d50e6bb11eb0d2d7d8b6af5dd8ecb50ab553", "&'()*+,"},
{"$7z$0$19$0$1122$8$1c1586d191f190890000000000000000$642253888$112$99$f55cf9ab802b10a83471abe9319711ae79906cd6921365167c389470a3a8a72b0d877379daae2c24ea2258e8586f12d5036aff9ddc8e26861467b0843ffb72e4410c2be76ec111d37f875c81b244ed172f1f4765a220d830a9615787e9d07f8582146556e9c566b64897a47d18a82b36", "&'()*+,-"},
{"$7z$0$19$0$1122$8$0df03cbdbc73e22a0000000000000000$3194757927$112$99$df53e9d8b4e02cf2962ad87912021508a36910c399a7abc4a3a5423fa2184816af7172418eb4763924ec8b099b7ca95abdc6faac9aaa6e181ffa60b7e8bdb2bf576536ca69152e3b6b97302c796bbc9dec78db6ba7a4a58e68f8ee28f27dea26bd4f848dc3a3315e97e1463b5c171ce5", "&'()*+,-."},
{"$7z$0$19$0$1122$8$7785351cf9fe5dfa0000000000000000$1304801610$112$99$7b35280384726da8521fee0786ef43e0aa621394a6f015b65cbd7f1329f43c4543b8a451a0007c03a3ce3f61e639c54ede3e580600b113777822b6d562390d14ed236e5bac3d3af63ae23015148a95e7ccbc9eea653b52c606ca09ec51fd2b0c4cfc2b760fccc1fe0ccdd9ee3fcb8129", "&'()*+,-./"},
{"$7z$0$19$0$1122$8$70eb7f4b821cf5310000000000000000$3381356868$112$99$c26db2cb89df1237f323d92044726d03cfc7ba83115e789243c3b2570ae674d8356a23e004b103638b1ea9fe6ff5db844a1ddcaaed8a71a8d8e343f73868b4acafd34d493345439b0e0be87d2cf52eb4cceaafcff0dfaf9cf25080693ede267460320e1282b869a5f0b6c8789e769640", "&'()*+,-./0"},
{"$7z$0$19$0$1122$8$2ac0f1307794d8e10000000000000000$2871514580$112$99$4783d91fa72c377310654e961120e71ecdd27ec2e67366e83291daefcea03514ca9ecea031fcbd25c0759c1f242219e673cee093ef361664f18dacf85ca0620fd7092477ceeff7c548df0a475ce93278a564fe4ddb4ee2e4695cbe417a792e822204390ca5a530208a8ed51bc01f79e6", "&'()*+,-./01"},
{"$7z$0$19$0$1122$8$5bc4988c71cba8b70000000000000000$2815498089$112$99$0e4368dde66925e2bfac9a450291f8f817beaa891f08c4d2735d20b3147df581e2f3c53abfe2b0971186ac39280eb354ca5989f9043ad0288302d0ac59a3c8fa99d26c9619b81d22996f24eec1dba361afdd5e50060c2599a40a00c83c4ee0bc4ebe6e3126a64a743af95d9b22ee5867", "&'()*+,-./012"},
{"$7z$0$19$0$1122$8$33ab0ad513b7d6910000000000000000$107430285$112$99$f9f1195a4210eadc5b23f046f81c8cfaec3b90d8b6b67893f10bd9bedd0d859d0695bca5ce315cecbc2910dce27e4c1a1416675d841901c8d84846360b1919ebcba91143713c6b755758d3db64d39344da18222341818220cc43f3ee3a91cbc288f1aafe377b53def310d3b83d32aee3", "&'()*+,-./0123"},
{"$7z$0$19$0$1122$8$dd490a165c1b90f90000000000000000$2897354864$112$99$51efe41b67875503acebe2e199cb542a279520b468a61ba67b54612e317a84e95879a34eaad82124798f32c19f9c0786e8faaac768da5f6b2c91e3ba9f97a03a992c18b5b9b21a5f2b67ae9daeef37ec115f44bfb8b10ac3cb7862b6c024413a2ee801aa674df05e8b56bd8654f279f5", "&'()*+,-./01234"},
{"$7z$0$19$0$1122$8$9077cb191a5969b40000000000000000$3637063426$112$99$1e74746c59bdfe6b3f3d957493c9d5b92ba358f97e19d30e20443cb2fbac0501e07a162344ac7cf7cfa727c70a2bcf52593accc5c2c070c2331863ac76da5ad2f5de374292a87c6af67ab561f9cf71ae472ed1267d481c250f5b4d82d0ec0b2b8531db1fe4637c3f4e3a08de1b9b5418", "&'()*+,-./012345"},
{"$7z$0$19$0$1122$8$adc090d27b0343d30000000000000000$1147570982$112$99$ac14b9dc3751cfe6c1c719ceef3d73946fff2b0f924e06cd3177883df770e5505551bcf5598277801f46584a4f41530f50007c776d2bb91fd160148042275dfe4e420ff72244409f59c687a5bb2d0fc1bb29138689094fe40bb0f22785c63c631cd05abf4f7f3c9b6832e192e103d2f1", "&'()*+,-./0123456"},
{"$7z$0$19$0$1122$8$8dee69dc35517a2a0000000000000000$87427823$112$99$ea36cf8b577a0b5f31115f8550987f05f174b347a8a6433a08c013ecd816c8ecaad163c62db9bae6c57ace3c2a6ce0b36f78ad4723328cc022906400eed55e0e3685a5e8e6b369df780ee72f3d25ccd49d7f40d013052e080723dd4c0b1c75302c884ea956e3b6fd27261eb8c49dea51", "&'()*+,-./01234567"},
{"$7z$0$19$0$1122$8$200ce603d6f355f10000000000000000$3012105149$112$99$0ae42342f52172ad921178a25df3666e34e5a217d0afb3655088806f821d374bf522c197e59b131dbc574d4c936472f59f8892f69e47724ea52ecc5dc7d3ed734c557c9698a6f01519039714c065ad25008003c93cb7f694ee07267d5fcdebab5d149d5404023a0112faec2264d33ff6", "&'()*+,-./012345678"},
{"$7z$0$19$0$1122$8$a5007fc77fa5cc0b0000000000000000$1082728565$112$99$32c404c9633e9c61b76556e169695248008c51ca8f7f0f79c4a271ac6eb1d905a2622132f2f6988f9f3f5e375c592ec63d92d7b183b5801b149595ed440b23a083633de9f1cb5b6ac3238b7523b23141e686e6cbe9d4d3a28fc6489e902c17aeff6cd4cb516bef5cd5c6def78cb88ad4", "&'()*+,-./0123456789"},
{"$7z$0$19$0$1122$8$fd531c4e580be9a60000000000000000$1843420503$112$99$704289830b1add1c8ee6fd622ecf5b8da01988580bdb52f6269cc61c21838849d3a04299eaee15e0cae0eff9f6c3c82f71e434b3aa1c0ca824b90438c1c983130218acd128d9186e5dc2d19a8db602a0382cb60dadb4641b46fe532b799d29a4b882beaa9217f48ddccc99578617f8a0", "&'()*+,-./0123456789:"},
{"$7z$0$19$0$1122$8$7f94a95f71c1b0df0000000000000000$141406606$112$99$1a510a6fda9788b4f4b2274ea929044c00b61b23946bc417ead90ad64dcc9a55378f9ab74f7d693a5dcf455c00f82f6c2a885b664f4ab10c9969026714ce2773030f1c5872ca3948cd612e21b321826c2a561104d57a3ba2055f03aa9cc264821544ec4bccc41f4ac76aab97accb8f9c", "&'()*+,-./0123456789:;"},
{"$7z$0$19$0$1122$8$e24e93c7a9ebde080000000000000000$718561925$112$99$580bf36388526c932c22e3227b51774b6963a9c5b96fc8e2ac70a4302864fa88f50e7c00d9a79e0bca0f07a236e51200dc23435b7680e6fa99b19d790ac093af615a972f8b232686c21279234a2582f9714c5a1a2d326084158eba3e81b4f8ad40784d84baa8ddbed19f1c6603156d2c", "&'()*+,-./0123456789:;<"},
#if PLAINTEXT_LENGTH > 23
{"$7z$0$19$0$1122$8$6fbd519735b131710000000000000000$1248418560$112$99$cc9e3c97073d7fd37f04d4e6983b386e3ac00f6292dedb0f566dccf22cdbbb55fee8669edade383e96aa0a740e2b42aa7fddbe5831cac10828c624ee03a1a256c6e777c3d714c55296cb815c509a252b9426fe8d4566c944efe3fac5ea94910e55a390aef2c729a031e832c406049810", "&'()*+,-./0123456789:;<="},
{"$7z$0$19$0$1122$8$3ce1b899fc03d9c30000000000000000$1452122600$112$99$d4be60d5ab390713c7189f0dd808227c01f15f71fcf4bbccce6cb9238d6418c115eff59784d96ff8944575710a5799c7bcb761e8f1bfb7646a0e8fac3728ba4cca44fb82e5dd9f87bb26828566af64374b512fa094d35af8d743bded88b6257ec98a99b50dd225d4608b283bf035ac08", "&'()*+,-./0123456789:;<=>"},
{"$7z$0$19$0$1122$8$656e2285aabed25b0000000000000000$3885982465$112$99$77f2871e556e7f5278a9e896e91cd386ca8935128957d31fdce0603ea0e71c08b908a4c2d9f2d279757ced848be9482067c9d7935c88e5233aaa94a101d29908f7f015646758029d2078d25d0886bb9f0cdc0dd5136d72e90ceeea678564b199866dd8c9e5fe927102ee2dcf1cd4167f", "&'()*+,-./0123456789:;<=>?"},
{"$7z$0$19$0$1122$8$44ffefa48fa5a5b00000000000000000$1011653568$112$99$5d2504a1eb819218b9ad552e377d37e811ffccb64a554f404d982d209edfafb893b679cc881bbcbc606e67ffa055f712d7f140b554769511bc00321765830ea7c5db810fa2000ae7f4250b74aa61d881db66ae6f30e4c8e71887960c117b268d9934b8b5d52d4abdcb42b0e4ff40b805", "&'()*+,-./0123456789:;<=>?@"},
{"$7z$0$19$0$1122$8$b6e089dd0c52b6b80000000000000000$1229766981$112$99$49a8334d64d9cc7d710fe3b9c35f5d7cb0ec44d5db8a90966fbee93f85fdeeeca859c55519addb20c4628c9204dd24d1169b34dc53a2a685440fae7ed6748c172a8e9dcc42c8dffe60196818ad17a6f9314fcfd4d97cab3c18cf279df344e00fd04eaff32f29cbfcdb6832cfb69fe351", "&'()*+,-./0123456789:;<=>?@A"},
#endif /* PLAINTEXT_LENGTH > 23 */
#endif /* DEBUG */
{NULL}
};
static sevenzip_password *inbuffer;
static sevenzip_hash *outbuffer;
static sevenzip_salt currentsalt;
static cl_mem mem_in, mem_out, mem_salt;
static cl_kernel sevenzip_init, sevenzip_final;
#define insize (sizeof(sevenzip_password) * global_work_size)
#define outsize (sizeof(sevenzip_hash) * global_work_size)
#define statesize (sizeof(sevenzip_state) * global_work_size)
#define saltsize (sizeof(sevenzip_salt))
#define cracked_size (sizeof(*cracked) * global_work_size)
static struct fmt_main *self;
#define HASH_LOOPS 0x4000
#define LOOP_COUNT ((1 << currentsalt.iterations) / HASH_LOOPS)
#define STEP 0
#define SEED 16
static int split_events[] = { 2, -1, -1 };
static const char *warn[] = {
"xfer: ", ", init: ", ", crypt: ", ", final: ", ", xfer: "
};
// This file contains auto-tuning routine(s). It has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
size_t s;
s = autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_init);
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel));
s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, sevenzip_final));
return s;
}
static void create_clobj(size_t global_work_size, struct fmt_main *self)
{
cl_int cl_error;
inbuffer = (sevenzip_password*) mem_calloc(1, insize);
outbuffer = (sevenzip_hash*) mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_salt =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, saltsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem salt");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(sevenzip_init, 0, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_salt),
&mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 1, sizeof(mem_salt),
&mem_salt), "Error while setting mem_salt kernel argument");
HANDLE_CLERROR(clSetKernelArg(sevenzip_final, 2, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(sevenzip_init), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseKernel(sevenzip_final), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
CRC32_t crc;
self = _self;
opencl_prepare_dev(gpu_id);
CRC32_Init(&crc);
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
cl_int cl_error;
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d -DHASH_LOOPS=%d",
PLAINTEXT_LENGTH, HASH_LOOPS);
opencl_init("$JOHN/kernels/7z_kernel.cl",
gpu_id, build_opts);
sevenzip_init = clCreateKernel(program[gpu_id], "sevenzip_init",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
crypt_kernel = clCreateKernel(program[gpu_id], "sevenzip_loop",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
sevenzip_final = clCreateKernel(program[gpu_id], "sevenzip_final",
&cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, HASH_LOOPS, split_events,
warn, 2, self,
create_clobj, release_clobj,
sizeof(sevenzip_state), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1 << 19, 0, 15000000000ULL);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, NumCyclesPower;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (strlen(p) > 1 || '0' != *p) /* p must be "0" */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* NumCyclesPower */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
NumCyclesPower = atoi(p);
if (NumCyclesPower > 24 || NumCyclesPower < 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt length */
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16) /* salt length */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv length */
goto err;
if (strlen(p) > 2)
goto err;
if (!isdec(p))
goto err;
len = atoi(p);
if (len > 16) /* iv length */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iv */
goto err;
if (!ishexlc(p))
goto err;
if (strlen(p) / 2 > len && strcmp(p+len*2, "0000000000000000"))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* crc */
goto err;
if (!isdecu(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data length */
goto err;
if(!isdec(p))
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "$")) == NULL) /* unpacksize */
goto err;
if (!isdec(p)) /* no way to validate, other than atoi() works for it */
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* data */
goto err;
if (strlen(p) / 2 != len) /* validates data_len atoi() */
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static union {
struct custom_salt _cs;
ARCH_WORD_32 dummy;
} un;
struct custom_salt *cs = &(un._cs);
memset(cs, 0, SALT_SIZE);
ctcopy += 4;
p = strtokm(ctcopy, "$");
cs->type = atoi(p);
p = strtokm(NULL, "$");
cs->NumCyclesPower = atoi(p);
p = strtokm(NULL, "$");
cs->SaltSize = atoi(p);
p = strtokm(NULL, "$"); /* salt */
p = strtokm(NULL, "$");
cs->ivSize = atoi(p);
p = strtokm(NULL, "$"); /* iv */
for (i = 0; i < cs->ivSize; i++)
cs->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$"); /* crc */
cs->crc = atou(p); /* unsigned function */
p = strtokm(NULL, "$");
cs->length = atoi(p);
p = strtokm(NULL, "$");
cs->unpacksize = atoi(p);
p = strtokm(NULL, "$"); /* crc */
for (i = 0; i < cs->length; i++)
cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->SaltSize);
if (currentsalt.iterations != cur_salt->NumCyclesPower)
new_keys = 1;
currentsalt.length = cur_salt->SaltSize;
currentsalt.iterations = cur_salt->NumCyclesPower;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt,
CL_FALSE, 0, saltsize, ¤tsalt, 0, NULL, NULL),
"Transfer salt to gpu");
}
static void clear_keys(void)
{
memset(inbuffer, 0, insize);
}
static void sevenzip_set_key(char *key, int index)
{
UTF16 c_key[PLAINTEXT_LENGTH + 1];
int length = strlen(key);
/* Convert password to utf-16-le format (--encoding aware) */
length = enc_to_utf16(c_key, PLAINTEXT_LENGTH,
(UTF8*)key, length);
if (length <= 0)
length = strlen16(c_key);
length *= 2;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, c_key, length);
new_keys = 1;
}
static char *get_key(int index)
{
UTF16 c_key[PLAINTEXT_LENGTH + 1];
int length = inbuffer[index].length;
memcpy(c_key, inbuffer[index].v, length);
c_key[length / 2] = 0;
return (char*)utf16_to_enc(c_key);
}
static int salt_compare(const void *x, const void *y)
{
int c;
const struct custom_salt *s1 = x;
const struct custom_salt *s2 = y;
// we had to make the salt order deterministic, so that intersalt-restore works
if (s1->NumCyclesPower != s2->NumCyclesPower)
return (s1->NumCyclesPower - s2->NumCyclesPower);
c = memcmp(s1->salt, s2->salt, 16);
if (c) return c;
return memcmp(s1->iv, s2->iv, 16);
}
// XXX port Python code to C *OR* use code from LZMA SDK
static int validFolder(unsigned char *data)
{
// int numcoders = self._read64Bit(file)
return 0;
}
static int sevenzip_decrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[cur_salt->length];
AES_KEY akey;
unsigned char iv[16];
union {
unsigned char crcc[4];
unsigned int crci;
} _crc_out;
unsigned char *crc_out = _crc_out.crcc;
unsigned int ccrc;
CRC32_t crc;
int i;
int nbytes, margin;
memcpy(iv, cur_salt->iv, 16);
if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
}
AES_cbc_encrypt(cur_salt->data, out, cur_salt->length, &akey, iv, AES_DECRYPT);
/* various verifications tests */
// test 0, padding check, bad hack :-(
margin = nbytes = cur_salt->length - cur_salt->unpacksize;
i = cur_salt->length - 1;
while (nbytes > 0) {
if (out[i] != 0)
return -1;
nbytes--;
i--;
}
if (margin > 7) {
// printf("valid padding test ;-)\n");
// print_hex(out, cur_salt->length);
return 0;
}
// test 1, CRC test
CRC32_Init(&crc);
CRC32_Update(&crc, out, cur_salt->unpacksize);
CRC32_Final(crc_out, crc);
ccrc = _crc_out.crci; // computed CRC
if (ccrc == cur_salt->crc)
return 0; // XXX don't be too eager!
// XXX test 2, "well-formed folder" test
if (validFolder(out)) {
printf("validFolder check ;-)\n");
return 0;
}
return -1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
//fprintf(stderr, "%s(%d) lws %zu gws %zu\n", __FUNCTION__, count, local_work_size, global_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
if (ocl_autotune_running || new_keys) {
int i;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
// Run 1st kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_init, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]),
"Run init kernel");
// Run loop kernel
for (i = 0; i < (ocl_autotune_running ? 1 : LOOP_COUNT); i++) {
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id],
crypt_kernel, 1, NULL, &global_work_size, lws, 0,
NULL, multi_profilingEvent[2]),
"Run loop kernel");
BENCH_CLERROR(clFinish(queue[gpu_id]),
"Error running loop kernel");
opencl_process_event();
}
// Run final kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], sevenzip_final, 1,
NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]),
"Run final kernel");
// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[4]),
"Copy result back");
}
new_keys = 0;
if (!ocl_autotune_running) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
/* decrypt and check */
if(sevenzip_decrypt(outbuffer[index].key, cur_salt->data) == 0)
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)(1 << my_salt->NumCyclesPower);
}
struct fmt_main fmt_opencl_sevenzip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT | FMT_UNICODE | FMT_UTF8,
{
"iteration count",
},
sevenzip_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
salt_compare,
set_salt,
sevenzip_set_key,
get_key,
clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
p_kernel.c |
/*
Author: Mohammed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
*/
#include <petscmat.h>
#include <petscsnes.h>
#include <petscvec.h>
#include <omp.h>
#include "inc/geometry.h"
#include "inc/ktime.h"
#include "inc/ker/kernel.h"
#include "inc/ker/phy.h"
/*
Evaluate Function F(x): Functional form used to convey the
nonlinear function to be solved by PETSc SNES
*/
int
ffunc(SNES snes, Vec x, Vec f, void *restrict ctx)
{
struct ctx *restrict c = (struct ctx *) ctx;
const size_t nnodes = c->g->n->sz;
const size_t bsz = c->g->c->bsz;
int ierr;
const double *restrict q;
ierr = VecGetArrayRead(x, (const PetscScalar **) &q);
CHKERRQ(ierr);
struct grad grad;
{
grad.bsz = c->g->c->bsz;
grad.dofs = c->g->c->sz;
grad.ie = c->g->s->ie;
grad.part = c->g->s->part;
grad.n0 = c->g->e->eptr->n0;
grad.n1 = c->g->e->eptr->n1;
grad.w0termsx = c->g->e->w->w0->x0;
grad.w0termsy = c->g->e->w->w0->x1;
grad.w0termsz = c->g->e->w->w0->x2;
grad.w1termsx = c->g->e->w->w1->x0;
grad.w1termsy = c->g->e->w->w1->x1;
grad.w1termsz = c->g->e->w->w1->x2;
grad.q = q;
grad.gradx0 = c->grad->x0;
grad.gradx1 = c->grad->x1;
grad.gradx2 = c->grad->x2;
grad.t = &c->t->grad;
}
compute_grad(&grad);
double *restrict r;
ierr = VecGetArray(f, (PetscScalar **) &r);
CHKERRQ(ierr);
struct flux flux;
{
flux.bsz = c->g->c->bsz;
flux.nfnodes = c->g->b->f->n->sz;
flux.dofs = c->g->c->sz;
flux.snfc = c->g->s->snfc;
flux.pressure = c->iv->p;
flux.velocity_u = c->iv->u;
flux.velocity_v = c->iv->v;
flux.velocity_w = c->iv->w;
flux.f_xyz0 = c->g->b->f->n->xyz->x0;
flux.f_xyz1 = c->g->b->f->n->xyz->x1;
flux.f_xyz2 = c->g->b->f->n->xyz->x2;
flux.xyz0 = c->g->n->xyz->x0;
flux.xyz1 = c->g->n->xyz->x1;
flux.xyz2 = c->g->n->xyz->x2;
flux.ie = c->g->s->ie;
flux.part = c->g->s->part;
flux.snfic = c->g->s->snfic;
flux.n0 = c->g->e->eptr->n0;
flux.n1 = c->g->e->eptr->n1;
flux.nfptr = c->g->b->f->n->nptr;
flux.sn0 = c->g->b->snfptr->n0;
flux.sn1 = c->g->b->snfptr->n1;
flux.sn2 = c->g->b->snfptr->n2;
flux.x0 = c->g->e->xyzn->x0;
flux.x1 = c->g->e->xyzn->x1;
flux.x2 = c->g->e->xyzn->x2;
flux.x3 = c->g->e->xyzn->x3;
flux.q = q;
flux.gradx0 = c->grad->x0;
flux.gradx1 = c->grad->x1;
flux.gradx2 = c->grad->x2;
flux.r = r;
flux.t = &c->t->flux;
}
compute_flux(&flux);
struct ktime ktime;
setktime(&ktime);
const double *restrict q_;
ierr = VecGetArrayRead(c->ts->q, (const PetscScalar **) &q_);
CHKERRQ(ierr);
const double *restrict area = c->g->n->area;
double *restrict cdt = c->ts->cdt;
const double cfl = c->ts->cfl;
uint32_t i;
#pragma omp parallel for
for(i = 0; i < nnodes; i++)
{
const double t = area[i] / (cfl * cdt[i]);
const uint32_t idx = bsz * i;
r[idx + 0] += t * (q[idx + 0] - q_[idx + 0]);
r[idx + 1] += t * (q[idx + 1] - q_[idx + 1]);
r[idx + 2] += t * (q[idx + 2] - q_[idx + 2]);
r[idx + 3] += t * (q[idx + 3] - q_[idx + 3]);
}
ierr = VecRestoreArrayRead(c->ts->q, (const PetscScalar **) &q_);
CHKERRQ(ierr);
compute_time(&ktime, &c->t->tstep_contr);
ierr = VecRestoreArray(f, (PetscScalar **) &r);
CHKERRQ(ierr);
ierr = VecRestoreArrayRead(x, (const PetscScalar **) &q);
CHKERRQ(ierr);
return 0;
}
/*
Function used to convey the nonlinear Jacobian of the
function to be solved by SNES
Evaluate Jacobian F'(x)
Input vector; matrix that defines the approximate Jacobian;
matrix to be used to construct the preconditioner;
flag indicating information about the preconditioner matrix structure
user-defined context
*/
int
jfunc(SNES snes, Vec x, Mat Amat, Mat Pmat, void *restrict ctx)
{
struct ctx *restrict c = (struct ctx *) ctx;
/*
Resets a factored matrix to be treated as unfactored
*/
int ierr;
ierr = MatSetUnfactored(Pmat);
CHKERRQ(ierr);
const double *restrict q;
ierr = VecGetArrayRead(x, (const PetscScalar **) &q);
CHKERRQ(ierr);
/*
Fill the nonzero term of the A matrix
*/
struct fill fill;
{
fill.q = q;
fill.g = c->g;
fill.ts = c->ts;
fill.iv = c->iv;
fill.A = Pmat;
fill.t = c->t;
}
ierr = fill_mat(&fill);
CHKERRQ(ierr);
ierr = VecRestoreArrayRead(x, (const PetscScalar **) &q);
CHKERRQ(ierr);
ierr = MatAssemblyBegin(Amat, MAT_FINAL_ASSEMBLY);
CHKERRQ(ierr);
ierr = MatAssemblyEnd(Amat, MAT_FINAL_ASSEMBLY);
CHKERRQ(ierr);
return 0;
}
|
par_mgr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Two-grid system solver
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "par_mgr.h"
#ifdef HYPRE_USING_DSUPERLU
#include "dsuperlu.h"
#endif
/* Create */
void *
hypre_MGRCreate()
{
hypre_ParMGRData *mgr_data;
mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST);
/* block data */
(mgr_data -> block_size) = 1;
(mgr_data -> block_num_coarse_indexes) = NULL;
(mgr_data -> point_marker_array) = NULL;
(mgr_data -> block_cf_marker) = NULL;
/* general data */
(mgr_data -> max_num_coarse_levels) = 10;
(mgr_data -> A_array) = NULL;
(mgr_data -> P_array) = NULL;
(mgr_data -> RT_array) = NULL;
(mgr_data -> RAP) = NULL;
(mgr_data -> CF_marker_array) = NULL;
(mgr_data -> coarse_indices_lvls) = NULL;
(mgr_data -> A_ff_array) = NULL;
(mgr_data -> F_fine_array) = NULL;
(mgr_data -> U_fine_array) = NULL;
(mgr_data -> aff_solver) = NULL;
(mgr_data -> fine_grid_solver_setup) = NULL;
(mgr_data -> fine_grid_solver_solve) = NULL;
(mgr_data -> F_array) = NULL;
(mgr_data -> U_array) = NULL;
(mgr_data -> residual) = NULL;
(mgr_data -> rel_res_norms) = NULL;
(mgr_data -> Vtemp) = NULL;
(mgr_data -> Ztemp) = NULL;
(mgr_data -> Utemp) = NULL;
(mgr_data -> Ftemp) = NULL;
(mgr_data -> num_iterations) = 0;
(mgr_data -> num_interp_sweeps) = 1;
(mgr_data -> num_restrict_sweeps) = 1;
(mgr_data -> trunc_factor) = 0.0;
(mgr_data -> max_row_sum) = 0.9;
(mgr_data -> strong_threshold) = 0.25;
(mgr_data -> P_max_elmts) = 0;
(mgr_data -> coarse_grid_solver) = NULL;
(mgr_data -> coarse_grid_solver_setup) = NULL;
(mgr_data -> coarse_grid_solver_solve) = NULL;
(mgr_data -> global_smoother) = NULL;
(mgr_data -> use_default_cgrid_solver) = 1;
(mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used
(mgr_data -> omega) = 1.;
(mgr_data -> max_iter) = 20;
(mgr_data -> tol) = 1.0e-6;
(mgr_data -> relax_type) = 0;
(mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms.
(mgr_data -> interp_type) = NULL;
(mgr_data -> restrict_type) = NULL;
(mgr_data -> num_relax_sweeps) = 1;
(mgr_data -> relax_weight) = 1.0;
(mgr_data -> logging) = 0;
(mgr_data -> print_level) = 0;
(mgr_data -> frelax_print_level) = 0;
(mgr_data -> cg_print_level) = 0;
(mgr_data -> l1_norms) = NULL;
(mgr_data -> reserved_coarse_size) = 0;
(mgr_data -> reserved_coarse_indexes) = NULL;
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
(mgr_data -> diaginv) = NULL;
(mgr_data -> global_smooth_iters) = 1;
(mgr_data -> global_smooth_type) = 0;
(mgr_data -> set_non_Cpoints_to_F) = 0;
(mgr_data -> idx_array) = NULL;
(mgr_data -> Frelax_method) = NULL;
(mgr_data -> VcycleRelaxVtemp) = NULL;
(mgr_data -> VcycleRelaxZtemp) = NULL;
(mgr_data -> FrelaxVcycleData) = NULL;
(mgr_data -> Frelax_num_functions) = NULL;
(mgr_data -> max_local_lvls) = 10;
(mgr_data -> use_non_galerkin_cg) = NULL;
(mgr_data -> print_coarse_system) = 0;
(mgr_data -> set_c_points_method) = 0;
(mgr_data -> lvl_to_keep_cpoints) = 0;
(mgr_data -> cg_convergence_factor) = 0.0;
(mgr_data -> truncate_coarse_grid_threshold) = 0.0;
return (void *) mgr_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* Destroy */
HYPRE_Int
hypre_MGRDestroy( void *data )
{
hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data;
HYPRE_Int i;
HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels);
/* block info data */
if ((mgr_data -> block_cf_marker))
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if(mgr_data -> block_num_coarse_indexes)
{
hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* final residual vector */
if((mgr_data -> residual))
{
hypre_ParVectorDestroy( (mgr_data -> residual) );
(mgr_data -> residual) = NULL;
}
if((mgr_data -> rel_res_norms))
{
hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST);
(mgr_data -> rel_res_norms) = NULL;
}
/* temp vectors for solve phase */
if((mgr_data -> Vtemp))
{
hypre_ParVectorDestroy( (mgr_data -> Vtemp) );
(mgr_data -> Vtemp) = NULL;
}
if((mgr_data -> Ztemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ztemp) );
(mgr_data -> Ztemp) = NULL;
}
if((mgr_data -> Utemp))
{
hypre_ParVectorDestroy( (mgr_data -> Utemp) );
(mgr_data -> Utemp) = NULL;
}
if((mgr_data -> Ftemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ftemp) );
(mgr_data -> Ftemp) = NULL;
}
/* coarse grid solver */
if((mgr_data -> use_default_cgrid_solver))
{
if((mgr_data -> coarse_grid_solver))
{
hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) );
}
(mgr_data -> coarse_grid_solver) = NULL;
}
/* l1_norms */
if ((mgr_data -> l1_norms))
{
for (i=0; i < (num_coarse_levels); i++)
{
hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]);
}
hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST);
}
/* coarse_indices_lvls */
if ((mgr_data -> coarse_indices_lvls))
{
for (i=0; i < (num_coarse_levels); i++)
if ((mgr_data -> coarse_indices_lvls)[i])
hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST);
hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST);
}
/* linear system and cf marker array */
if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array)
{
for (i=1; i < num_coarse_levels+1; i++) {
hypre_ParVectorDestroy((mgr_data -> F_array)[i]);
hypre_ParVectorDestroy((mgr_data -> U_array)[i]);
if ((mgr_data -> P_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]);
if ((mgr_data -> RT_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]);
hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST);
}
for (i=1; i < (num_coarse_levels); i++) {
if ((mgr_data -> A_array)[i])
hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]);
}
}
/* AMG for Frelax */
if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array)
{
for (i=1; i < num_coarse_levels+1; i++)
{
if (mgr_data -> F_fine_array[i])
hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]);
if (mgr_data -> U_fine_array[i])
hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]);
}
for (i=1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_ff_array)[i])
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]);
}
if (mgr_data -> use_default_fsolver)
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]);
}
hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> F_fine_array) = NULL;
hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> U_fine_array) = NULL;
hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST);
(mgr_data -> A_ff_array) = NULL;
}
if(mgr_data -> aff_solver)
{
for (i = 1; i < (num_coarse_levels); i++) {
if ((mgr_data -> aff_solver)[i])
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]);
}
if (mgr_data -> use_default_fsolver)
{
if ((mgr_data -> aff_solver)[0])
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]);
}
hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST);
(mgr_data -> aff_solver) = NULL;
}
if((mgr_data -> F_array))
{
hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST);
(mgr_data -> F_array) = NULL;
}
if((mgr_data -> U_array))
{
hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST);
(mgr_data -> U_array) = NULL;
}
if((mgr_data -> A_array))
{
hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST);
(mgr_data -> A_array) = NULL;
}
if((mgr_data -> P_array))
{
hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST);
(mgr_data -> P_array) = NULL;
}
if((mgr_data -> RT_array))
{
hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST);
(mgr_data -> RT_array) = NULL;
}
if((mgr_data -> CF_marker_array))
{
hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST);
(mgr_data -> CF_marker_array) = NULL;
}
if((mgr_data -> reserved_Cpoint_local_indexes))
{
hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
}
if (mgr_data -> restrict_type)
{
hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
if (mgr_data -> interp_type)
{
hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
/* Frelax_method */
if (mgr_data -> Frelax_method)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
/* Frelax_num_functions */
if (mgr_data -> Frelax_num_functions)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
/* data for V-cycle F-relaxation */
if((mgr_data -> VcycleRelaxVtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) );
(mgr_data -> VcycleRelaxVtemp) = NULL;
}
if((mgr_data -> VcycleRelaxZtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) );
(mgr_data -> VcycleRelaxZtemp) = NULL;
}
if (mgr_data -> FrelaxVcycleData) {
for (i = 0; i < num_coarse_levels; i++) {
if ((mgr_data -> FrelaxVcycleData)[i]) {
hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]);
(mgr_data -> FrelaxVcycleData)[i] = NULL;
}
}
hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST);
(mgr_data -> FrelaxVcycleData) = NULL;
}
/* data for reserved coarse nodes */
if(mgr_data -> reserved_coarse_indexes)
{
hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* index array for setting Cpoints by global block */
if ((mgr_data -> set_c_points_method) == 1)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
/* array for setting option to use non-Galerkin coarse grid */
if (mgr_data -> use_non_galerkin_cg)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
/* coarse level matrix - RAP */
if ((mgr_data -> RAP))
hypre_ParCSRMatrixDestroy((mgr_data -> RAP));
if ((mgr_data -> diaginv))
hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST);
if ((mgr_data -> global_smoother))
{
if (mgr_data -> global_smooth_type == 8)
{
HYPRE_EuclidDestroy((mgr_data -> global_smoother));
}
else if (mgr_data -> global_smooth_type == 16)
{
HYPRE_ILUDestroy((mgr_data -> global_smoother));
}
}
/* mgr data */
hypre_TFree(mgr_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Create data for V-cycle F-relaxtion */
void *
hypre_MGRCreateFrelaxVcycleData()
{
hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST);
hypre_ParAMGDataAArray(vdata) = NULL;
hypre_ParAMGDataPArray(vdata) = NULL;
hypre_ParAMGDataFArray(vdata) = NULL;
hypre_ParAMGDataCFMarkerArray(vdata) = NULL;
hypre_ParAMGDataVtemp(vdata) = NULL;
hypre_ParAMGDataAMat(vdata) = NULL;
hypre_ParAMGDataBVec(vdata) = NULL;
hypre_ParAMGDataZtemp(vdata) = NULL;
hypre_ParAMGDataCommInfo(vdata) = NULL;
hypre_ParAMGDataUArray(vdata) = NULL;
hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL;
hypre_ParAMGDataNumLevels(vdata) = 0;
hypre_ParAMGDataMaxLevels(vdata) = 10;
hypre_ParAMGDataNumFunctions(vdata) = 1;
hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0;
hypre_ParAMGDataRelaxOrder(vdata) = 1;
hypre_ParAMGDataMaxCoarseSize(vdata) = 9;
hypre_ParAMGDataMinCoarseSize(vdata) = 0;
hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9;
return (void *) vdata;
}
/* Destroy data for V-cycle F-relaxation */
HYPRE_Int
hypre_MGRDestroyFrelaxVcycleData( void *data )
{
hypre_ParAMGData * vdata = (hypre_ParAMGData*) data;
HYPRE_Int i;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST);
for (i=1; i < num_levels + 1; i++)
{
if (hypre_ParAMGDataAArray(vdata)[i])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]);
if (hypre_ParAMGDataPArray(vdata)[i-1])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST);
}
/* see comments in par_coarsen.c regarding special case for CF_marker */
if (num_levels <= 1)
{
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST);
}
/* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */
//hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata));
hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST);
//hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST);
/* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */
/*
if (hypre_ParAMGDataZtemp(vdata))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata));
*/
if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST);
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(vdata, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Set C-point variables for each reduction level */
/* Currently not implemented */
HYPRE_Int
hypre_MGRSetReductionLevelCpoints( void *mgr_vdata,
HYPRE_Int nlevels,
HYPRE_Int *num_coarse_points,
HYPRE_Int **level_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_coarse_levels) = nlevels;
(mgr_data -> num_coarse_per_level) = num_coarse_points;
(mgr_data -> level_coarse_indexes) = level_coarse_indexes;
return hypre_error_flag;
}
/* Initialize some data */
/* Set whether non-coarse points on each level should be explicitly tagged as F-points */
HYPRE_Int
hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag;
return hypre_error_flag;
}
/* Set whether the reserved C points are reduced before the coarse grid solve */
HYPRE_Int
hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> lvl_to_keep_cpoints) = level;
return hypre_error_flag;
}
/* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */
HYPRE_Int
hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_BigInt *begin_idx_array,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
if((mgr_data -> idx_array) != NULL) {
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST);
if (begin_idx_array != NULL)
{
for (i = 0; i < block_size; i++) {
index_array[i] = *(begin_idx_array+i);
}
}
hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes);
(mgr_data -> idx_array) = index_array;
(mgr_data -> set_c_points_method) = 1;
return hypre_error_flag;
}
/* Initialize/ set local block data information */
HYPRE_Int
hypre_MGRSetCpointsByBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
HYPRE_Int i,j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
/* free block cf_marker data if not previously destroyed */
if((mgr_data -> block_cf_marker) != NULL)
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for(j=0; j<block_num_coarse_points[i]; j++)
{
(block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK;
}
}
/* store block_num_coarse_points */
if(max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for(i=0; i<max_num_levels; i++)
block_num_coarse_indexes[i] = block_num_coarse_points[i];
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> set_c_points_method) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *lvl_num_coarse_points,
HYPRE_Int **lvl_coarse_indexes,
HYPRE_Int *point_marker_array)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i,j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
/* free block cf_marker data if not previously destroyed */
if((mgr_data -> block_cf_marker) != NULL)
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for(j=0; j<lvl_num_coarse_points[i]; j++)
{
block_cf_marker[i][j] = lvl_coarse_indexes[i][j];
}
}
/* store block_num_coarse_points */
if(max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for(i=0; i<max_num_levels; i++)
block_num_coarse_indexes[i] = lvl_num_coarse_points[i];
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> point_marker_array) = point_marker_array;
(mgr_data -> set_c_points_method) = 2;
return hypre_error_flag;
}
/*Set number of points that remain part of the coarse grid throughout the hierarchy */
HYPRE_Int
hypre_MGRSetReservedCoarseNodes(void *mgr_vdata,
HYPRE_Int reserved_coarse_size,
HYPRE_BigInt *reserved_cpt_index)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_BigInt *reserved_coarse_indexes = NULL;
HYPRE_Int i;
if (!mgr_data)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n");
return hypre_error_flag;
}
if(reserved_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/* free data not previously destroyed */
if((mgr_data -> reserved_coarse_indexes))
{
hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* set reserved coarse nodes */
if(reserved_coarse_size > 0)
{
reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST);
for(i=0; i<reserved_coarse_size; i++)
reserved_coarse_indexes[i] = reserved_cpt_index[i];
}
(mgr_data -> reserved_coarse_size) = reserved_coarse_size;
(mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes;
return hypre_error_flag;
}
/* Set CF marker array */
HYPRE_Int
hypre_MGRCoarsen(hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int fixed_coarse_size,
HYPRE_Int *fixed_coarse_indexes,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr,
HYPRE_Int cflag)
{
HYPRE_Int *CF_marker = NULL;
HYPRE_Int *cindexes = fixed_coarse_indexes;
HYPRE_Int i, row, nc;
HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* If this is the last level, coarsen onto fixed coarse set */
if(cflag)
{
if(*CF_marker_ptr != NULL)
{
hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST);
}
CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST);
memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int));
/* first mark fixed coarse set */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
}
else
{
/* First coarsen to get initial CF splitting.
* This is then followed by updating the CF marker to pass
* coarse information to the next levels. NOTE: It may be
* convenient to implement this way (allows the use of multiple
* coarsening strategies without changing too much code),
* but not necessarily the best option, compared to initializing
* CF_marker first and then coarsening on subgraph which excludes
* the initialized coarse nodes.
*/
hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker);
/* Update CF_marker to correct Cpoints marked as Fpoints. */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
for (row = 0; row <nloc; row++)
{
if(CF_marker[row] == CMRK) continue;
CF_marker[row] = FMRK;
}
#if 0
/* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points
* in the next level.
*/
nc = 0;
index_i = 0;
for (row = 0; row <nloc; row++)
{
/* loop through new c-points */
if(CF_marker[row] == CMRK) nc++;
else if(CF_marker[row] == S_CMRK)
{
/* previously marked c-point is part of fixed coarse set. Track its current local index */
cindexes[index_i++] = nc;
/* reset c-point from S_CMRK to CMRK */
cf_marker[row] = CMRK;
nc++;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
else
{
CF_marker[row] = FMRK;
}
}
/* check if this should be last level */
if( nc == fixed_coarse_size)
last_level = 1;
//printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size);
#endif
}
/* set CF_marker */
*CF_marker_ptr = CF_marker;
return hypre_error_flag;
}
/* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */
HYPRE_Int
hypre_MGRBuildP( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int method,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ((CF_marker[i1] >= 0) && (method > 0))
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/* index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt;
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] < 0)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if ((CF_marker[i1] >= 0) && (method > 0))
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
/*
if(method == 0)
{
P_diag_data[jj_counter] = 0.0;
}
*/
if (method == 1)
{
P_diag_data[jj_counter] = - A_diag_data[jj];
}
else if (method == 2)
{
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
}
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
/*
if(method == 0)
{
P_offd_data[jj_counter_offd] = 0.0;
}
*/
if (method == 1)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj];
}
else if (method == 2)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
}
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Interpolation for MGR - Dynamic Row Sum method */
HYPRE_Int
hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
/*--------------------------------------------------------------------
* Set up the indexes for the DRS method
*--------------------------------------------------------------------*/
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
// hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Scale ParCSR matrix A = scalar * A
* A: the target CSR matrix
* vector: array of real numbers
*/
HYPRE_Int
hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector,
hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j, n_local;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
n_local = hypre_CSRMatrixNumRows(A_diag);
for (i = 0; i < n_local; i++)
{
HYPRE_Real factor = vector[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
A_diag_data[j] *= factor;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
A_offd_data[j] *= factor;
}
}
return(0);
}
/************************************************************
* Available methods:
* 0: inv(A_FF) approximated by its diagonal inverse
* 1: inv(A_FF) approximated by sparse approximate inverse
*************************************************************/
HYPRE_Int
hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix *RT,
HYPRE_Int bsize,
HYPRE_Int ordering,
HYPRE_Int method,
HYPRE_Int Pmax,
HYPRE_Int keep_stencil,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix **A_h_ptr)
{
HYPRE_Int *c_marker, *f_marker;
HYPRE_Int n_local_fine_grid, i, i1, jj;
hypre_ParCSRMatrix *A_cc;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_cf;
hypre_ParCSRMatrix *A_h;
hypre_ParCSRMatrix *A_h_correction;
HYPRE_Int max_elmts = Pmax;
// HYPRE_Real wall_time = 0.;
hypre_ParCSRMatrix *P_mod = NULL;
HYPRE_Int my_id;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Comm_rank(comm,&my_id);
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fine_grid; i++)
{
HYPRE_Int point_type = CF_marker[i];
hypre_assert(point_type == 1 || point_type == -1);
c_marker[i] = point_type;
f_marker[i] = -point_type;
}
// get the A_cc sub-block
hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc);
if (method == 0)
{
if (keep_stencil)
{
//wall_time = time_getWallclockSeconds();
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
// extract the diagonal of A_ff and compute D_ff_inv
hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff);
HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag);
HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag);
HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag);
HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag);
HYPRE_Real *D_ff_inv;
D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++)
{
i1 = A_ff_diag_j[jj];
if ( i==i1 )
{
D_ff_inv[i] = -1.0/A_ff_diag_data[jj];
}
}
}
// extract the diagonal of A_cf
hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf);
HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag);
HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag);
HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag);
n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag);
HYPRE_Real *D_cf;
D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
i1 = A_cf_diag_j[A_cf_diag_i[i]];
D_cf[i] = A_cf_diag_data[jj];
}
// compute the triple product
hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc);
hypre_ParCSRMatrixLeftScale(D_cf, A_fc);
A_h_correction = A_fc;
hypre_TFree(D_cf, HYPRE_MEMORY_HOST);
hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_cf);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time);
}
else
{
//wall_time = time_getWallclockSeconds();
P_mod = hypre_ParCSRMatrixCompleteClone(P);
hypre_ParCSRMatrixCopy(P,P_mod,1);
HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod);
hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod);
HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag);
HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag);
for (i = 0; i < n_local_rows; i ++)
{
if (CF_marker[i] >= 0)
{
HYPRE_Int ii = P_mod_diag_i[i];
P_mod_diag_data[ii] = 0.0;
}
}
hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product time new: %1.5f\n", wall_time);
hypre_ParCSRMatrixDestroy(P_mod);
}
}
else
{
// Approximate inverse for ideal interploation
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
hypre_ParCSRMatrix *A_ff_inv = NULL;
hypre_ParCSRMatrix *minus_Wp = NULL;
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
A_h_correction = hypre_ParMatmul(A_cf, minus_Wp);
hypre_ParCSRMatrixDestroy(minus_Wp);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_cf);
}
// perform dropping for A_h_correction
// specific to multiphase poromechanics
// we only keep the diagonal of each block
//wall_time = time_getWallclockSeconds();
HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction));
hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction);
HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag);
HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag);
hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction);
HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd);
if (Pmax > 0)
{
if (ordering == 0) // interleaved ordering
{
HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, memory_location);
HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, memory_location);
HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_diag_new = 0;
HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, memory_location);
HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, memory_location);
HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, memory_location);
HYPRE_Int num_nonzeros_offd_new = 0;
for (i = 0; i < n_local_cpoints; i++)
{
HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i];
HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Int row_start = i - (i % bsize);
HYPRE_Int row_stop = row_start + bsize - 1;
HYPRE_Int cnt = 0;
for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++)
{
aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag;
aux_data[cnt] = A_h_correction_offd_data[jj];
cnt++;
}
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++)
{
aux_j[cnt] = A_h_correction_diag_j[jj];
aux_data[cnt] = A_h_correction_diag_data[jj];
cnt++;
}
hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1);
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++)
{
i1 = A_h_correction_diag_j[jj];
if (i1 >= row_start && i1 <= row_stop)
{
// copy data to new arrays
A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj];
++num_nonzeros_diag_new;
}
else
{
// Do nothing
}
}
if (max_elmts > 0)
{
for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++)
{
HYPRE_Int col_idx = aux_j[jj];
HYPRE_Real col_value = aux_data[jj];
if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop))
{
A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value;
++num_nonzeros_diag_new;
}
else if (col_idx >= ncol_diag)
{
A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag;
A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value;
++num_nonzeros_offd_new;
}
}
}
A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new;
A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new;
hypre_TFree(aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(aux_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(A_h_correction_diag_i, memory_location);
hypre_TFree(A_h_correction_diag_j, memory_location);
hypre_TFree(A_h_correction_diag_data, memory_location);
hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new;
hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new;
hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new;
if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, memory_location);
if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, memory_location);
if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, memory_location);
hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new;
hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new;
hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new;
}
else
{
hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n");
exit(-1);
}
}
//hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time);
//hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered");
// coarse grid / schur complement
hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h);
*A_h_ptr = A_h;
//hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h");
hypre_ParCSRMatrixDestroy(A_cc);
hypre_ParCSRMatrixDestroy(A_h_correction);
hypre_TFree(c_marker, HYPRE_MEMORY_HOST);
hypre_TFree(f_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A,
HYPRE_BigInt *mgr_idx_array,
HYPRE_Solver A_ff_solver)
{
HYPRE_Int *U_marker, *S_marker, *P_marker;
HYPRE_Int n_fine, i;
HYPRE_BigInt ibegin;
hypre_ParCSRMatrix *A_up;
hypre_ParCSRMatrix *A_uu;
hypre_ParCSRMatrix *A_su;
hypre_ParCSRMatrix *A_pu;
hypre_ParVector *e1_vector;
hypre_ParVector *e2_vector;
hypre_ParVector *e3_vector;
hypre_ParVector *e4_vector;
hypre_ParVector *e5_vector;
n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
ibegin = hypre_ParCSRMatrixFirstRowIndex(A);
hypre_assert(ibegin == mgr_idx_array[0]);
U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
U_marker[i] = -1;
S_marker[i] = -1;
P_marker[i] = -1;
}
// create C and F markers
for (i = 0; i < n_fine; i++)
{
if (i < mgr_idx_array[1] - ibegin)
{
U_marker[i] = 1;
}
else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin))
{
S_marker[i] = 1;
}
else
{
P_marker[i] = 1;
}
}
// Get A_up
hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up);
// GetA_uu
hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu);
// Get A_su
hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su);
// Get A_pu
hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu);
e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up),
hypre_ParCSRMatrixGlobalNumCols(A_up),
hypre_ParCSRMatrixColStarts(A_up));
hypre_ParVectorInitialize(e1_vector);
hypre_ParVectorSetPartitioningOwner(e1_vector,0);
hypre_ParVectorSetConstantValues(e1_vector, 1.0);
e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e2_vector);
hypre_ParVectorSetPartitioningOwner(e2_vector,0);
hypre_ParVectorSetConstantValues(e2_vector, 0.0);
e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e3_vector);
hypre_ParVectorSetPartitioningOwner(e3_vector,0);
hypre_ParVectorSetConstantValues(e3_vector, 0.0);
e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su),
hypre_ParCSRMatrixGlobalNumRows(A_su),
hypre_ParCSRMatrixRowStarts(A_su));
hypre_ParVectorInitialize(e4_vector);
hypre_ParVectorSetPartitioningOwner(e4_vector,0);
hypre_ParVectorSetConstantValues(e4_vector, 0.0);
e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu),
hypre_ParCSRMatrixGlobalNumRows(A_pu),
hypre_ParCSRMatrixRowStarts(A_pu));
hypre_ParVectorInitialize(e5_vector);
hypre_ParVectorSetPartitioningOwner(e5_vector,0);
hypre_ParVectorSetConstantValues(e5_vector, 0.0);
// compute e2 = A_up * e1
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector);
// solve e3 = A_uu^-1 * e2
hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// print e4
hypre_ParVectorPrintIJ(e4_vector,1,"Dsp");
// compute e5 = A_pu * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector);
hypre_ParVectorPrintIJ(e5_vector,1,"Dpp");
hypre_ParVectorDestroy(e1_vector);
hypre_ParVectorDestroy(e2_vector);
hypre_ParVectorDestroy(e3_vector);
hypre_ParCSRMatrixDestroy(A_uu);
hypre_ParCSRMatrixDestroy(A_up);
hypre_ParCSRMatrixDestroy(A_pu);
hypre_ParCSRMatrixDestroy(A_su);
hypre_TFree(U_marker, HYPRE_MEMORY_HOST);
hypre_TFree(S_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **A_inv)
{
HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version;
HYPRE_Real mr_tol, nsh_tol;
HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *approx_A_inv = NULL;
print_level = 0;
nsh_max_iter = 2;
nsh_max_row_nnz = 2; // default 1000
mr_max_iter = 1;
mr_tol = 1.0e-3;
mr_max_row_nnz = 2; // default 800
mr_col_version = 0;
nsh_tol = 1.0e-3;
droptol[0] = 1.0e-2;
droptol[1] = 1.0e-2;
hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz,
nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level);
*A_inv = approx_A_inv;
if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *S,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1)? 1: -1;
F_marker[i] = (CF_marker[i] == 1) ? -1: 1;
}
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
// compute -Wp
minus_Wp = hypre_ParMatmul(S, A_fc);
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i+1] = jj_counter_offd;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_ff_inv;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
//HYPRE_Int jj_begin_row,jj_begin_row_offd;
//HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
//HYPRE_Int *coarse_counter;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i,jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1)? 1: -1;
F_marker[i] = (CF_marker[i] == 1) ? -1: 1;
}
// Get A_FF
hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff);
// hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff");
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
// hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv");
// hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc");
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
// hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp");
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
//hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
//HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// num_threads = hypre_NumThreads();
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
//coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
//jj_count[j]++;
//fine_to_coarse[i] = coarse_counter[j];
//coarse_counter[j]++;
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
//jj_count[j]++;
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
//jj_count_offd[j]++;
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
/*
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
*/
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
/*
if (num_procs > 1)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] += my_first_cpt;
}
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(minus_Wp);
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
}
*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
//P_marker[row_counter] = jj_counter;
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
//P_marker_offd[row_counter] = jj_counter_offd;
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i+1] = jj_counter_offd;
}
//hypre_printf("Num rows of Wp = %d\n", row_counter);
//P_offd_i[row_counter] = jj_counter_offd;
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
/*
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_minus_Wp_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
*/
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
//hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
//hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_ff_inv);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
/* Setup interpolation operator */
HYPRE_Int
hypre_MGRBuildInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
hypre_ParCSRMatrix **P,
HYPRE_Int interp_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *P_ptr = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Interpolation for each level */
if (interp_type <3)
{
hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr);
/* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */
/*
if(interp_type == 2)
{
for(i=0; i<numsweeps; i++)
{
hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
}
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
*/
}
else if (interp_type == 4)
{
hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else if (interp_type == 99)
{
hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag,
trunc_factor, max_elmts, &P_ptr);
}
/* set pointer to P */
*P = P_ptr;
return hypre_error_flag;
}
/* Setup restriction operator */
HYPRE_Int
hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
hypre_ParCSRMatrix **R,
HYPRE_Int restrict_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *R_ptr = NULL;
hypre_ParCSRMatrix *AT = NULL;
hypre_ParCSRMatrix *ST = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Build AT (transpose A) */
if (restrict_type > 0)
{
hypre_ParCSRMatrixTranspose(A, &AT, 1);
}
/* Restriction for each level */
if (restrict_type == 0)
{
hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 1 || restrict_type == 2)
{
hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 3)
{
hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr);
hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts);
}
else
{
/* Build new strength matrix */
hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST);
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag,
trunc_factor, max_elmts, &R_ptr);
}
/* set pointer to P */
*R = R_ptr;
/* Free memory */
if (restrict_type > 0)
{
hypre_ParCSRMatrixDestroy(AT);
}
if (restrict_type > 5)
{
hypre_ParCSRMatrixDestroy(ST);
}
return hypre_error_flag;
}
void hypre_blas_smat_inv_n4 (HYPRE_Real *a)
{
const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3];
const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7];
const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11];
const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15];
const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42;
const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43;
const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42;
const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33;
const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43;
const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41;
const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43;
const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31;
const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41;
const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42;
const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41;
const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32;
const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42;
const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41;
const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42;
const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31;
const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41;
HYPRE_Real det_inv;
//if ( fabs(det) < 1e-22 ) {
//hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det);
/*
printf("##----------------------------------------------\n");
printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2);
printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5);
printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7);
printf("##----------------------------------------------\n");
getchar();
*/
//}
det_inv = 1.0/det;
a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv;
a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv;
a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv;
a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv;
}
void hypre_blas_mat_inv(HYPRE_Real *a,
HYPRE_Int n)
{
HYPRE_Int i,j,k,l,u,kn,in;
HYPRE_Real alinv;
if (n == 4)
{
hypre_blas_smat_inv_n4(a);
}
else
{
for (k=0; k<n; ++k) {
kn = k*n;
l = kn+k;
//if (fabs(a[l]) < SMALLREAL) {
// printf("### WARNING: Diagonal entry is close to zero!");
// printf("### WARNING: diag_%d=%e\n", k, a[l]);
// a[l] = SMALLREAL;
//}
alinv = 1.0/a[l];
a[l] = alinv;
for (j=0; j<k; ++j) {
u = kn+j; a[u] *= alinv;
}
for (j=k+1; j<n; ++j) {
u = kn+j; a[u] *= alinv;
}
for (i=0; i<k; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=k+1; i<n; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=0; i<k; ++i) {
u=i*n+k; a[u] *= -alinv;
}
for (i=k+1; i<n; ++i) {
u=i*n+k; a[u] *= -alinv;
}
} // end for (k=0; k<n; ++k)
}// end if
}
HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr,
void *mgr_vdata, HYPRE_Int debug_flag)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int num_procs, my_id;
HYPRE_Int blk_size = (mgr_data -> block_size);
HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag;
HYPRE_Real *B_diag_data;
HYPRE_Int *B_diag_i;
HYPRE_Int *B_diag_j;
hypre_CSRMatrix *B_offd;
HYPRE_Int i,ii;
HYPRE_Int j,jj;
HYPRE_Int k;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int n_block, left_size,inv_size;
// HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Real * diaginv;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int block_scaling_error = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
//printf("n = %d\n",n);
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
//printf("inv_size = %d\n",inv_size);
hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv));
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* First Pass: Determine size of B and fill in
*-----------------------------------------------------------------------*/
B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST);
B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
B_diag_i[n] = inv_size;
//B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
//B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
//B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
//B_offd_i[n] = 1;
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST);
//printf("n_block = %d\n",n_block);
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
/* for (k = 0;k < blk_size; k++) */
/* { */
/* for (j = 0;j < blk_size; j++) */
/* { */
/* bidx = k*blk_size + j; */
/* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */
/* } */
/* } */
hypre_blas_mat_inv(diaginv, blk_size);
for (k = 0;k < blk_size; k++)
{
B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size;
//B_offd_i[i*nb2+k] = 0;
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
B_diag_j[bidx] = i*blk_size + j;
B_diag_data[bidx] = diaginv[k*blk_size + j];
}
}
}
//printf("Before create\n");
B = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
0,
inv_size,
0);
//printf("After create\n");
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixData(B_diag) = B_diag_data;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixData(B_offd) = NULL;
hypre_CSRMatrixI(B_offd) = NULL;
hypre_CSRMatrixJ(B_offd) = NULL;
/* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */
*B_ptr = B;
return(block_scaling_error);
}
HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Int method,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size*blk_size;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*blk_size +j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++)
{
ii = A_diag_j[jj];
if (method == 0)
{
// Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
else if (method == 1)
{
// Gauss-Seidel for diagonal part
res[j] -= A_diag_data[jj] * u_data[ii];
}
else
{
// Default do Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++)
{
// always do Jacobi for off-diagonal part
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0;j < blk_size; j++)
{
bidx1 = i*blk_size +j;
for (k = 0;k < blk_size; k++)
{
bidx = i*nb2 +j*blk_size+k;
u_data[bidx1] += res[k]*diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return(relax_error);
}
HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size*blk_size;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*blk_size +j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++)
{
ii = A_diag_j[jj];
//res[j] -= A_diag_data[jj] * Vtemp_data[ii];
//printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]);
res[j] -= A_diag_data[jj] * u_data[ii];
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++)
{
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0;j < blk_size; j++)
{
bidx1 = i*blk_size +j;
for (k = 0;k < blk_size; k++)
{
bidx = i*nb2 +j*blk_size+k;
u_data[bidx1] += res[k]*diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return(relax_error);
}
/*Block smoother*/
HYPRE_Int
hypre_blockRelax_setup(hypre_ParCSRMatrix *A,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Real **diaginvptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv = *diaginvptr;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
if (diaginv !=NULL)
{
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
else {
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
*diaginvptr = diaginv;
return 1;
}
HYPRE_Int
hypre_blockRelax(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int method,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp);
/*-----------------------------------------------------------------
* Free temperary memeory
*-----------------------------------------------------------------*/
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
return(relax_error);
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetFSolver( void *mgr_vdata,
HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*),
HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*),
void *fsolver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
HYPRE_Solver **aff_solver = (mgr_data -> aff_solver);
if (aff_solver == NULL)
aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST);
/* only allow to set F-solver for the first level */
aff_solver[0] = (HYPRE_Solver *) fsolver;
(mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve;
(mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup;
(mgr_data -> aff_solver) = aff_solver;
(mgr_data -> use_default_fsolver) = 0;
return hypre_error_flag;
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseSolver( void *mgr_vdata,
HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*),
HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*),
void *coarse_grid_solver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
(mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve;
(mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup;
(mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver;
(mgr_data -> use_default_cgrid_solver) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetAffInv( void *mgr_vdata,
hypre_ParCSRMatrix *A_ff_inv )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> A_ff_inv) = A_ff_inv;
return hypre_error_flag;
}
/* Set the maximum number of coarse levels.
* maxcoarselevs = 1 yields the default 2-grid scheme.
*/
HYPRE_Int
hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_num_coarse_levels) = maxcoarselevs;
return hypre_error_flag;
}
/* Set the system block size */
HYPRE_Int
hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> block_size) = bsize;
return hypre_error_flag;
}
/* Set the relaxation type for the fine levels of the reduction.
* Currently supports the following flavors of relaxation types
* as described in the documentation:
* relax_types 0 - 8, 13, 14, 18, 19, 98.
* See par_relax.c and par_relax_more.c for more details.
*
*/
HYPRE_Int
hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> relax_type) = relax_type;
return hypre_error_flag;
}
/* Set the number of relaxation sweeps */
HYPRE_Int
hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_relax_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_method) != NULL) {
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method;
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_method) != NULL) {
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (relax_method != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = 0;
}
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/
HYPRE_Int
hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> use_non_galerkin_cg) != NULL) {
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (cg_method != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = cg_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = 0;
}
}
(mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg;
return hypre_error_flag;
}
/* Set the F-relaxation number of functions for each level */
HYPRE_Int
hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_num_functions) != NULL) {
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (num_functions != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = num_functions[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = 1;
}
}
(mgr_data -> Frelax_num_functions) = Frelax_num_functions;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (restrict_type != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = *(restrict_type + i);
}
}
else
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = 0;
}
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = restrict_type;
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_restrict_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = interpType;
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (interpType != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = *(interpType + i);
}
}
else
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = 2;
}
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_interp_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the threshold to truncate the coarse grid at each
* level of reduction
*/
HYPRE_Int
hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> truncate_coarse_grid_threshold) = threshold;
return hypre_error_flag;
}
/* Set print level for F-relaxation solver */
HYPRE_Int
hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> frelax_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> cg_print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> print_level) = print_level;
return hypre_error_flag;
}
/* Set logging level for mgr solver */
HYPRE_Int
hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> logging) = logging;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/* Set convergence tolerance for mgr solver */
HYPRE_Int
hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> tol) = tol;
return hypre_error_flag;
}
/* Set max number of iterations for mgr global smoother */
HYPRE_Int
hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_iters) = max_iter;
return hypre_error_flag;
}
/* Set global smoothing type for mgr solver */
HYPRE_Int
hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_type) = iter_type;
return hypre_error_flag;
}
/* Set the maximum number of non-zero entries for restriction
and interpolation operator if classical AMG interpolation is used */
HYPRE_Int
hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> P_max_elmts) = P_max_elmts;
return hypre_error_flag;
}
/* Get number of iterations for MGR solver */
HYPRE_Int
hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = mgr_data->num_iterations;
return hypre_error_flag;
}
/* Get residual norms for MGR solver */
HYPRE_Int
hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*res_norm = mgr_data->final_rel_residual_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*conv_factor = (mgr_data -> cg_convergence_factor);
return hypre_error_flag;
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A,
HYPRE_Int *row_cf_marker,
HYPRE_Int *col_cf_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_block_ptr )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *coarse_dof_func_ptr = NULL;
HYPRE_BigInt *num_row_cpts_global = NULL;
HYPRE_BigInt *num_col_cpts_global = NULL;
hypre_ParCSRMatrix *Ablock;
HYPRE_BigInt *col_map_offd_Ablock;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *Ablock_diag;
hypre_CSRMatrix *Ablock_offd;
HYPRE_Real *Ablock_diag_data;
HYPRE_Int *Ablock_diag_i;
HYPRE_Int *Ablock_diag_j;
HYPRE_Real *Ablock_offd_data;
HYPRE_Int *Ablock_offd_i;
HYPRE_Int *Ablock_offd_j;
HYPRE_Int Ablock_diag_size, Ablock_offd_size;
HYPRE_Int *Ablock_marker;
HYPRE_Int ii_counter;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int *col_coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_row_cpts;
HYPRE_BigInt total_global_col_cpts;
HYPRE_Int num_cols_Ablock_offd;
// HYPRE_BigInt my_first_row_cpt, my_first_col_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
// HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
/* get the number of coarse rows */
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global);
hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]);
// my_first_row_cpt = num_row_cpts_global[0];
if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1];
hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/* get the number of coarse rows */
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global);
hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]);
// my_first_col_cpt = num_col_cpts_global[0];
if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1];
hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* First Pass: Determine size of Ablock and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (col_cf_marker[i] > 0)
{
fine_to_coarse[i] = col_coarse_counter[j];
col_coarse_counter[j]++;
}
if (row_cf_marker[i] > 0)
{
//fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
coarse_counter[i+1] += coarse_counter[i];
col_coarse_counter[i+1] += col_coarse_counter[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
ii_counter = coarse_counter[i];
Ablock_diag_size = jj_counter;
Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, memory_location);
Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location);
Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location);
Ablock_diag_i[ii_counter] = jj_counter;
Ablock_offd_size = jj_counter_offd;
Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, memory_location);
Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location);
Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
//-----------------------------------------------------------------------
// Send and receive fine_to_coarse info.
//-----------------------------------------------------------------------
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = col_coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
// for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
ii_counter = 0;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (row_cf_marker[i] > 0)
{
// Diagonal part of Ablock //
Ablock_diag_i[ii_counter] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
Ablock_diag_j[jj_counter] = fine_to_coarse[i1];
Ablock_diag_data[jj_counter] = A_diag_data[jj];
jj_counter++;
}
}
// Off-Diagonal part of Ablock //
Ablock_offd_i[ii_counter] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
Ablock_offd_j[jj_counter_offd] = i1;
Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];
jj_counter_offd++;
}
}
}
ii_counter++;
}
}
Ablock_offd_i[ii_counter] = jj_counter_offd;
Ablock_diag_i[ii_counter] = jj_counter;
}
Ablock = hypre_ParCSRMatrixCreate(comm,
total_global_row_cpts,
total_global_col_cpts,
num_row_cpts_global,
num_col_cpts_global,
0,
Ablock_diag_i[ii_counter],
Ablock_offd_i[ii_counter]);
Ablock_diag = hypre_ParCSRMatrixDiag(Ablock);
hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data;
hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i;
hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j;
Ablock_offd = hypre_ParCSRMatrixOffd(Ablock);
hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data;
hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i;
hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j;
num_cols_Ablock_offd = 0;
if (Ablock_offd_size)
{
Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
Ablock_marker[i] = 0;
num_cols_Ablock_offd = 0;
for (i=0; i < Ablock_offd_size; i++)
{
index = Ablock_offd_j[i];
if (!Ablock_marker[index])
{
num_cols_Ablock_offd++;
Ablock_marker[index] = 1;
}
}
col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_Ablock_offd; i++)
{
while (Ablock_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < Ablock_offd_size; i++)
Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
Ablock_offd_j[i],
num_cols_Ablock_offd);
hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_Ablock_offd)
{
hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock;
hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd;
}
hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd);
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(Ablock);
}
*A_block_ptr= Ablock;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRBuildAff( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int i;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* create a copy of the CF_marker array and switch C-points to F-points */
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < local_numrows; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr);
/* Free copy of CF marker */
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return(0);
}
/*********************************************************************************
* This routine assumes that the 'toVector' is larger than the 'fromVector' and
* the CF_marker is of the same length as the toVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'fromVector'.
* It adds the values of the 'fromVector' to the 'toVector' where the marker is the
* same as the 'point_type'
*********************************************************************************/
HYPRE_Int
hypre_MGRAddVectorP ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++) {
if (CF_marker[i] == point_type) {
toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j];
j++;
}
}
return 0;
}
/*************************************************************************************
* This routine assumes that the 'fromVector' is larger than the 'toVector' and
* the CF_marker is of the same length as the fromVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'toVector'.
* It adds the values of the 'fromVector' where the marker is the
* same as the 'point_type' to the 'toVector'
*************************************************************************************/
HYPRE_Int
hypre_MGRAddVectorR ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++) {
if (CF_marker[i] == point_type) {
toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i];
j++;
}
}
return 0;
}
/*
HYPRE_Int
hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions,
HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr,
hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int i;
for (i = 0; i < local_num_variables; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr);
hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr);
hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr);
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return 0;
}
*/
/* Get pointer to coarse grid matrix for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> RAP == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n");
return hypre_error_flag;
}
*RAP = mgr_data->RAP;
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> U_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*sol = mgr_data->U_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> F_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*rhs = mgr_data->F_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Print coarse grid linear system (for debugging)*/
HYPRE_Int
hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
mgr_data->print_coarse_system = print_flag;
return hypre_error_flag;
}
/* Print solver params */
HYPRE_Int
hypre_MGRWriteSolverParams(void *mgr_vdata)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
hypre_printf("MGR Setup parameters: \n");
hypre_printf("Block size: %d\n", (mgr_data -> block_size));
hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels));
hypre_printf("Relax type: %d\n", (mgr_data -> relax_type));
hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F));
hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method));
for (i = 0; i < max_num_coarse_levels; i++)
{
hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]);
hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]);
hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]);
hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]);
HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i];
hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points);
hypre_printf("Cpoints indices: ");
for (j = 0; j < lvl_num_coarse_points; j++)
{
if ((mgr_data -> block_cf_marker)[i][j] == 1)
{
hypre_printf("%d ", j);
}
}
hypre_printf("\n");
}
hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size));
hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints));
hypre_printf("\n MGR Solver Parameters: \n");
hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps));
hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps));
hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps));
hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type));
hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters));
hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter));
hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol));
hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver));
if((mgr_data -> use_default_fsolver) >= 0)
{
hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver));
}
return hypre_error_flag;
}
#ifdef HYPRE_USING_DSUPERLU
void *
hypre_MGRDirectSolverCreate()
{
hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST);
return (void *) dslu_data;
}
HYPRE_Int
hypre_MGRDirectSolverSetup( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
/* Par Data Structure variables */
HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_local;
HYPRE_Int num_rows;
HYPRE_Int num_procs, my_id;
HYPRE_Int pcols=1, prows=1;
HYPRE_BigInt *big_rowptr = NULL;
hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver;
HYPRE_Int info = 0;
HYPRE_Int nrhs = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Merge diag and offd into one matrix (global ids) */
A_local = hypre_MergeDiagAndOffd(A);
num_rows = hypre_CSRMatrixNumRows(A_local);
/* Now convert hypre matrix to a SuperMatrix */
#ifdef HYPRE_MIXEDINT
{
HYPRE_Int *rowptr = NULL;
HYPRE_Int i;
rowptr = hypre_CSRMatrixI(A_local);
big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows+1), HYPRE_MEMORY_HOST);
for(i=0; i<(num_rows+1); i++)
{
big_rowptr[i] = (HYPRE_BigInt)rowptr[i];
}
}
#else
big_rowptr = hypre_CSRMatrixI(A_local);
#endif
dCreate_CompRowLoc_Matrix_dist(
&(dslu_data->A_dslu),global_num_rows,global_num_rows,
hypre_CSRMatrixNumNonzeros(A_local),
num_rows,
hypre_ParCSRMatrixFirstRowIndex(A),
hypre_CSRMatrixData(A_local),
hypre_CSRMatrixBigJ(A_local),big_rowptr,
SLU_NR_loc, SLU_D, SLU_GE);
/* DOK: SuperLU frees assigned data, so set them to null before
* calling hypre_CSRMatrixdestroy on A_local to avoid memory errors.
*/
#ifndef HYPRE_MIXEDINT
hypre_CSRMatrixI(A_local) = NULL;
#endif
hypre_CSRMatrixData(A_local) = NULL;
hypre_CSRMatrixBigJ(A_local) = NULL;
hypre_CSRMatrixDestroy(A_local);
/*Create process grid */
while (prows*pcols <= num_procs) ++prows;
--prows;
pcols = num_procs/prows;
while (prows*pcols != num_procs)
{
prows -= 1;
pcols = num_procs/prows;
}
//hypre_printf(" prows %d pcols %d\n", prows, pcols);
superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid));
set_default_options_dist(&(dslu_data->dslu_options));
dslu_data->dslu_options.Fact = DOFACT;
dslu_data->dslu_options.PrintStat = NO;
/*dslu_data->dslu_options.IterRefine = SLU_DOUBLE;
dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A;
dslu_data->dslu_options.DiagPivotThresh = 1.0;
dslu_data->dslu_options.ReplaceTinyPivot = NO; */
dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct));
dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU));
PStatInit(&(dslu_data->dslu_data_stat));
dslu_data->global_num_rows = global_num_rows;
dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
dslu_data->berr[0] = 0.0;
pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu),
&(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs,
&(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU),
&(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info);
dslu_data->dslu_options.Fact = FACTORED;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverSolve( void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
hypre_SLUDistSolve(solver, f, u);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRDirectSolverDestroy( void *solver )
{
hypre_SLUDistDestroy(solver);
return hypre_error_flag;
}
#endif
|
mutex.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
omp_lock_t lock;
printf("%" PRIu64 ": &lock: %lli\n", ompt_get_thread_data()->value, &lock);
omp_init_lock(&lock);
omp_set_lock(&lock);
omp_unset_lock(&lock);
omp_destroy_lock(&lock);
omp_nest_lock_t nest_lock;
printf("%" PRIu64 ": &nest_lock: %lli\n", ompt_get_thread_data()->value, &nest_lock);
omp_init_nest_lock(&nest_lock);
omp_set_nest_lock(&nest_lock);
omp_set_nest_lock(&nest_lock);
omp_unset_nest_lock(&nest_lock);
omp_unset_nest_lock(&nest_lock);
omp_destroy_nest_lock(&nest_lock);
//print_retadd();
#pragma omp critical
{
print_ids(0);
}
int x = 3;
#pragma omp atomic
x++;
#pragma omp ordered
{
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// TODO: check wait ids
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: &lock: [[WAIT_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_init_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_lock: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_lock: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_init_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_critical: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_critical: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_critical: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// atomic cannot be tested because it is implemented with atomic hardware instructions
// disabled_CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_atomic: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// disabled_CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_atomic: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_ordered: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_ordered: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_ordered: wait_id=[[WAIT_ID]], return_address={{0x[0-f]+}}
return 0;
}
|
cross.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <ops/declarable/helpers/helpers.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
void FORCEINLINE _cross(NDArray<T> *a, NDArray<T> *b, NDArray<T> *o) {
auto a0 = a->getScalar(0);
auto a1 = a->getScalar(1);
auto a2 = a->getScalar(2);
auto b0 = b->getScalar(0);
auto b1 = b->getScalar(1);
auto b2 = b->getScalar(2);
o->putScalar(0, a1 * b2 - a2 * b1);
o->putScalar(1, a2 * b0 - a0 * b2);
o->putScalar(2, a0 * b1 - a1 * b0);
}
template <typename T>
void FORCEINLINE _crossBatched(NDArray<T> *a, NDArray<T> *b, NDArray<T> *o) {
auto _a = a->reshape(a->ordering(), {-1, 3});
auto _b = b->reshape(b->ordering(), {-1, 3});
auto _o = o->reshape(o->ordering(), {-1, 3});
auto tadsA = _a->allTensorsAlongDimension({1});
auto tadsB = _b->allTensorsAlongDimension({1});
auto tadsO = _o->allTensorsAlongDimension({1});
int tads = tadsA->size();
#pragma omp parallel for simd schedule(static)
for (int e = 0; e < tads; e++) {
auto a_ = tadsA->at(e);
auto b_ = tadsB->at(e);
auto o_ = tadsO->at(e);
helpers::_cross(a_, b_, o_);
}
delete tadsA;
delete tadsB;
delete tadsO;
delete _a;
delete _b;
delete _o;
}
}
}
} |
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
#include "openmc/random_lcg.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::vector<Plot> plots; //!< Plot instance container
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter
extern int plotter_stream; // Stream index used by the plotter
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const std::vector<int> &v) {
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
bool operator ==(const RGBColor& other) {
return red == other.red && green == other.green && blue == other.blue;
}
// Members
uint8_t red, green, blue;
};
// some default colors
const RGBColor WHITE {255, 255, 255};
const RGBColor RED {255, 0, 0};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
void set_overlap(size_t y, size_t x);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
std::array<size_t, 3> pixels_; //!< Plot size in pixels
bool color_overlaps_; //!< Show overlapping cells?
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
#ifdef __GNUC__
default:
__builtin_unreachable();
#endif
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord_[0].universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord_ = 1;
// local variables
bool found_cell = find_cell(&p, 0);
j = p.n_coord_ - 1;
if (level >=0) {j = level + 1;}
if (found_cell) {
data.set_value(y, x, p, j);
}
if (color_overlaps_ && check_cell_overlap(&p, false)) {
data.set_overlap(y, x);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
void set_overlap_color(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_ {WHITE}; //!< Plot background color
RGBColor overlap_color_ {RED}; //!< Plot overlap color
std::vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
charge_deposition.h | #ifndef XFIELDS_CHARGE_DEPOSITION_H
#define XFIELDS_CHARGE_DEPOSITION_H
//include_file atomicadd.clh for_context opencl
//include_file atomicadd.h for_context cpu_serial cpu_openmp
/*gpukern*/ void p2m_rectmesh3d(
// INPUTS:
// length of x, y, z arrays
const int nparticles,
// particle positions
/*gpuglmem*/ const double* x,
/*gpuglmem*/ const double* y,
/*gpuglmem*/ const double* z,
// particle weights
/*gpuglmem*/ const double* part_weights,
// mesh origin
const double x0, const double y0, const double z0,
// mesh distances per cell
const double dx, const double dy, const double dz,
// mesh dimension (number of cells)
const int nx, const int ny, const int nz,
// OUTPUTS:
/*gpuglmem*/ double *grid1d
) {
double vol_m1 = 1/(dx*dy*dz);
#pragma omp parallel for //only_for_context cpu_openmp
for (int pidx=0; pidx<nparticles; pidx++){ //vectorize_over pidx nparticles
double pwei = part_weights[pidx];
// indices
int jx = floor((x[pidx] - x0) / dx);
int ix = floor((y[pidx] - y0) / dy);
int kx = floor((z[pidx] - z0) / dz);
// distances
double dxi = x[pidx] - (x0 + jx * dx);
double dyi = y[pidx] - (y0 + ix * dy);
double dzi = z[pidx] - (z0 + kx * dz);
// weights
double wijk = pwei * vol_m1 * (1.-dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz);
double wi1jk = pwei * vol_m1 * (1.-dxi/dx) * (dyi/dy) * (1.-dzi/dz);
double wij1k = pwei * vol_m1 * (dxi/dx) * (1.-dyi/dy) * (1.-dzi/dz);
double wi1j1k = pwei * vol_m1 * (dxi/dx) * (dyi/dy) * (1.-dzi/dz);
double wijk1 = pwei * vol_m1 * (1.-dxi/dx) * (1.-dyi/dy) * (dzi/dz);
double wi1jk1 = pwei * vol_m1 * (1.-dxi/dx) * (dyi/dy) * (dzi/dz);
double wij1k1 = pwei * vol_m1 * (dxi/dx) * (1.-dyi/dy) * (dzi/dz);
double wi1j1k1 = pwei * vol_m1 * (dxi/dx) * (dyi/dy) * (dzi/dz);
if (jx >= 0 && jx < nx - 1 && ix >= 0 && ix < ny - 1
&& kx >= 0 && kx < nz - 1)
{
atomicAdd(&grid1d[jx + ix*nx + kx*nx*ny], wijk);
atomicAdd(&grid1d[jx+1 + ix*nx + kx*nx*ny], wij1k);
atomicAdd(&grid1d[jx + (ix+1)*nx + kx*nx*ny], wi1jk);
atomicAdd(&grid1d[jx+1 + (ix+1)*nx + kx*nx*ny], wi1j1k);
atomicAdd(&grid1d[jx + ix*nx + (kx+1)*nx*ny], wijk1);
atomicAdd(&grid1d[jx+1 + ix*nx + (kx+1)*nx*ny], wij1k1);
atomicAdd(&grid1d[jx + (ix+1)*nx + (kx+1)*nx*ny], wi1jk1);
atomicAdd(&grid1d[jx+1 + (ix+1)*nx + (kx+1)*nx*ny], wi1j1k1);
}
}//end_vectorize
}
#endif
|
w8_e2_output.c | // Checking the output from an openMP program.
#include <stdio.h>
#include <omp.h>
int main(int argc, char const *argv[]) {
int total_sum = 0;
int i;
#pragma omp parallel default(shared) reduction(+:total_sum) num_threads(4)
{
int my_id = omp_get_thread_num();
int my_sum = 0;
#pragma omp for schedule(static,10)
for (i=1; i<=100; i++)
my_sum += i;
printf("From thread No.%d: my_sum=%d\n", my_id, my_sum);
total_sum += my_sum;
}
printf("Total sum=%d\n",total_sum);
return 0;
}
|
GB_unop__identity_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp64_fp32)
// op(A') function: GB (_unop_tran__identity_fp64_fp32)
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp64_fp32)
(
double *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
double z = (double) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
distributiongenerator.h | // @file distributiongenerator.h This code provides basic structure for
// distribution generators. This should be inherited by all other distribution
// generators.
// @author TPOC: contact@palisade-crypto.org
//
// @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. THIS SOFTWARE IS
// PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#define LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
#include <chrono>
#include <memory>
#include <mutex>
#include <random>
#include <thread>
#include "math/backend.h"
#include "utils/prng/blake2engine.h"
// #define FIXED_SEED // if defined, then uses a fixed seed number for
// reproducible results during debug. Use only one OMP thread to ensure
// reproducibility
namespace lbcrypto {
// Defines the PRNG implementation used by PALISADE.
// The cryptographically secure PRNG used by PALISADE is based on BLAKE2 hash
// functions. A user can replace it with a different PRNG if desired by defining
// the same methods as for the Blake2Engine class.
typedef Blake2Engine PRNG;
/**
* @brief The class providing the PRNG capability to all random distribution
* generators in PALISADE. THe security of Ring Learning With Errors (used for
* all crypto capabilities in PALISADE) depends on the randomness of uniform,
* ternary, and Gaussian distributions, which derive their randomness from the
* PRNG.
*/
class PseudoRandomNumberGenerator {
public:
/**
* @brief Returns a reference to the PRNG engine
*/
static void InitPRNG() {
int threads = PalisadeParallelControls.GetNumThreads();
if (threads == 0) {
threads = 1;
}
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < threads; ++i) {
GetPRNG();
}
}
static PRNG &GetPRNG() {
// initialization of PRNGs
if (m_prng == nullptr) {
#pragma omp critical
{
#if defined(FIXED_SEED)
// Only used for debugging in the single-threaded mode.
std::cerr << "**FOR DEBUGGING ONLY!!!! Using fixed initializer for "
"PRNG. Use a single thread only, e.g., OMP_NUM_THREADS=1!"
<< std::endl;
std::array<uint32_t, 16> seed{};
seed[0] = 1;
m_prng = std::make_shared<PRNG>(seed);
#else
// A 512-bit seed is generated for each thread (this roughly corresponds
// to 256 bits of security). The seed is the sum of a random sample
// generated using std::random_device (typically works correctly in
// Linux, MacOS X, and MinGW starting with GCC 9.2) and a BLAKE2 sample
// seeded from current time stamp, a hash of the current thread, and a
// memory location of a heap variable. The BLAKE2 sample is added in
// case random_device is deterministic (happens on MinGW with GCC
// below 9.2). All future calls to PRNG use the seed generated here.
// The code below derives randomness from time, thread id, and a memory
// location of a heap variable. This seed is relevant only if the
// implementation of random_device is deterministic (as in older
// versions of GCC in MinGW)
std::array<uint32_t, 16> initKey{};
// high-resolution clock typically has a nanosecond tick period
// Arguably this may give up to 32 bits of entropy as the clock gets
// recycled every 4.3 seconds
initKey[0] = std::chrono::high_resolution_clock::now()
.time_since_epoch()
.count();
// A thread id is often close to being random (on most systems)
initKey[1] = std::hash<std::thread::id>{}(std::this_thread::get_id());
// On a 64-bit machine, the thread id is 64 bits long
// skip on 32-bit arm architectures
#ifndef __arm__
if (sizeof(size_t) == 8)
initKey[2] =
(std::hash<std::thread::id>{}(std::this_thread::get_id()) >> 32);
#endif
// heap variable; we are going to use the least 32 bits of its memory
// location as the counter for BLAKE2 This will increase the entropy of
// the BLAKE2 sample
void *mem = malloc(1);
free(mem);
uint32_t counter = reinterpret_cast<long long>(mem);
PRNG gen(initKey, counter);
std::uniform_int_distribution<uint32_t> distribution(0);
std::array<uint32_t, 16> seed{};
for (uint32_t i = 0; i < 16; i++) {
seed[i] = distribution(gen);
}
std::array<uint32_t, 16> rdseed{};
size_t attempts = 3;
bool rdGenPassed = false;
size_t idx = 0;
while (!rdGenPassed && idx < attempts) {
try {
std::random_device genR;
for (uint32_t i = 0; i < 16; i++) {
// we use the fact that there is no overflow for unsigned integers
// (from C++ standard) i.e., arithmetic mod 2^32 is performed. For
// the seed to be random, it is sufficient for one of the two
// samples below to be random. In almost all practical cases,
// distribution(genR) is random. We add distribution(gen) just in
// case there is an implementation issue with random_device (as in
// older MinGW systems).
rdseed[i] = distribution(genR);
}
rdGenPassed = true;
} catch (std::exception &e) {
}
idx++;
}
for (uint32_t i = 0; i < 16; i++) {
seed[i] += rdseed[i];
}
m_prng = std::make_shared<PRNG>(seed);
#endif
}
}
return *m_prng;
}
private:
// shared pointer to a thread-specific PRNG engine
static std::shared_ptr<PRNG> m_prng;
#if !defined(FIXED_SEED)
// avoid contention on m_prng
// local copies of m_prng are created for each thread
#pragma omp threadprivate(m_prng)
#endif
};
/**
* @brief Abstract class describing generator requirements.
*
* The Distribution Generator defines the methods that must be implemented by a
* real generator. It also holds the single PRNG, which should be called by all
* child class when generating a random number is required.
*
*/
template <typename VecType>
class DistributionGenerator {
public:
DistributionGenerator() {}
virtual ~DistributionGenerator() {}
};
} // namespace lbcrypto
#endif // LBCRYPTO_MATH_DISTRIBUTIONGENERATOR_H_
|
GB_unaryop__lnot_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_uint32
// op(A') function: GB_tran__lnot_bool_uint32
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_uint32
(
bool *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c | #include <stdio.h>
#define N 1024
#define TEST_SIMPLE_NW 1
#define TEST_LOOP 1
#define TEST_LOOP_NW 1
int a[N], b[N];
int aa[N], bb[N];
int main() {
int i;
int error, totError = 0;
#if TEST_SIMPLE_NW
for (i=0; i<N; i++) a[i] = b[i] = i;
// alloc, move to
#pragma omp target enter data nowait map(alloc: a[0:N/4]) map(to: b[0:N/4])
#pragma omp target enter data nowait map(alloc: a[N/4:N/4]) map(to: b[N/4:N/4])
#pragma omp target enter data nowait map(alloc: a[N/2:N/4]) map(to: b[N/2:N/4])
#pragma omp target enter data nowait map(alloc: a[3*(N/4):N/4]) map(to: b[3*(N/4):N/4])
#pragma omp taskwait
// compute
#pragma omp target nowait map(from: a[0:N/4]) map(to: b[0:N/4])
{
int j;
for(j=0; j<N/4; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(from: a[N/4:N/4]) map(to: b[N/4:N/4])
{
int j;
for(j=N/4; j<N/2; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(from: a[N/2:N/4]) map(to: b[N/2:N/4])
{
int j;
for(j=N/2; j<3*(N/4); j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(from: a[3*(N/4):N/4]) map(to: b[3*(N/4):N/4])
{
int j;
for(j=3*(N/4); j<N; j++) a[j] = b[j]+1;
}
#pragma omp taskwait
#pragma omp target exit data nowait map(from: a[0:N/4]) map(release: b[0:N/4])
#pragma omp target exit data nowait map(from: a[N/4:N/4]) map(release: b[N/4:N/4])
#pragma omp target exit data nowait map(from: a[N/2:N/4]) map(release: b[N/2:N/4])
#pragma omp target exit data nowait map(from: a[3*(N/4):N/4]) map(release: b[3*(N/4):N/4])
#pragma omp taskwait
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with simple nowait conpleted successfully\n");
} else {
printf(" test with simple nowait conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_LOOP
for (i=0; i<N; i++) a[i] = b[i] = i;
#pragma omp parallel for schedule(static, 1)
for(i=0; i<4; i++) {
int lb = i* N/4;
int ub = lb + N/4;
// alloc, move to
#pragma omp target enter data map(alloc: a[lb:N/4]) map(to: b[lb:N/4])
// compute
#pragma omp target map(from: a[lb:N/4]) map(to: b[lb:N/4])
{
int j;
for(j=lb; j<ub; j++) a[j] = b[j]+1;
}
#pragma omp target exit data map(from: a[lb:N/4]) map(release: b[lb:N/4])
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
if (! error) {
printf(" test with test loop wait conpleted successfully\n");
} else {
printf(" test with test loop wait conpleted with %d error(s)\n", error);
totError++;
}
#endif
#if TEST_LOOP_NW
for (i=0; i<N; i++) a[i] = b[i] = aa[i] = bb[i] = i;
#pragma omp parallel for schedule(static, 1)
for(i=0; i<4; i++) {
int lb = i* N/4;
int ub = lb + N/4;
// alloc, move to
#pragma omp target enter data nowait map(alloc: a[lb:N/4]) map(to: b[lb:N/4])
#pragma omp target enter data nowait map(alloc: aa[lb:N/4]) map(to: bb[lb:N/4])
// compute
#pragma omp taskwait
#pragma omp target nowait map(from: a[lb:N/4]) map(to: b[lb:N/4])
{
int j;
for(j=lb; j<ub; j++) a[j] = b[j]+1;
}
#pragma omp target nowait map(from: aa[lb:N/4]) map(to: bb[lb:N/4])
{
int j;
for(j=lb; j<ub; j++) aa[j] = bb[j]+1;
}
// get and release data
#pragma omp taskwait
#pragma omp target exit data nowait map(from: a[lb:N/4]) map(release: b[lb:N/4])
#pragma omp target exit data nowait map(from: aa[lb:N/4]) map(release: bb[lb:N/4])
}
error=0;
for (i=0; i<N; i++) {
if (a[i] != i+1) printf("%d: a error %d != %d, error %d\n", i, a[i], i+1, ++error);
}
for (i=0; i<N; i++) {
if (aa[i] != i+1) printf("%d: aa error %d != %d, error %d\n", i, aa[i], i+1, ++error);
}
if (! error) {
printf(" test with test loop nowait conpleted successfully\n");
} else {
printf(" test with test loop nowait conpleted with %d error(s)\n", error);
totError++;
}
#endif
printf("completed with %d errors\n", totError);
return totError;
}
|
annoylib_omp.h | // To use OpenMP replace the content of annoylib.h to this file context
#ifndef ANNOYLIB_H
#define ANNOYLIB_H
#include <stdio.h>
#include <sys/stat.h>
#ifndef _MSC_VER
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stddef.h>
#include <omp.h>
#if defined(_MSC_VER) && _MSC_VER == 1500
typedef unsigned char uint8_t;
typedef signed __int32 int32_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
#if defined(_MSC_VER) || defined(__MINGW32__)
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include "mman.h"
#include <windows.h>
#else
#include <sys/mman.h>
#endif
#include <cerrno>
#include <string.h>
#include <math.h>
#include <vector>
#include <algorithm>
#include <queue>
#include <limits>
#ifdef _MSC_VER
// Needed for Visual Studio to disable runtime checks for mempcy
#pragma runtime_checks("s", off)
#endif
// This allows others to supply their own logger / error printer without
// requiring Annoy to import their headers. See RcppAnnoy for a use case.
#ifndef __ERROR_PRINTER_OVERRIDE__
#define showUpdate(...) { fprintf(stderr, __VA_ARGS__ ); }
#else
#define showUpdate(...) { __ERROR_PRINTER_OVERRIDE__( __VA_ARGS__ ); }
#endif
#ifndef _MSC_VER
#define popcount __builtin_popcountll
#else // See #293, #358
#define isnan(x) _isnan(x)
#define popcount cole_popcount
#endif
#if !defined(NO_MANUAL_VECTORIZATION) && defined(__GNUC__) && (__GNUC__ >6) && defined(__AVX512F__) // See #402
#pragma message "Using 512-bit AVX instructions"
#define USE_AVX512
#elif !defined(NO_MANUAL_VECTORIZATION) && defined(__AVX__) && defined (__SSE__) && defined(__SSE2__) && defined(__SSE3__)
#pragma message "Using 128-bit AVX instructions"
#define USE_AVX
#else
#pragma message "Using no AVX instructions"
#endif
#if defined(USE_AVX) || defined(USE_AVX512)
#if defined(_MSC_VER)
#include <intrin.h>
#elif defined(__GNUC__)
#include <x86intrin.h>
#endif
#endif
#ifndef ANNOY_NODE_ATTRIBUTE
#ifndef _MSC_VER
#define ANNOY_NODE_ATTRIBUTE __attribute__((__packed__))
// TODO: this is turned on by default, but may not work for all architectures! Need to investigate.
#else
#define ANNOY_NODE_ATTRIBUTE
#endif
#endif
using std::vector;
using std::pair;
using std::numeric_limits;
using std::make_pair;
inline void* remap_memory(void* _ptr, int _fd, size_t old_size, size_t new_size) {
#ifdef __linux__
_ptr = mremap(_ptr, old_size, new_size, MREMAP_MAYMOVE);
#else
munmap(_ptr, old_size);
#ifdef MAP_POPULATE
_ptr = mmap(_ptr, new_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, _fd, 0);
#else
_ptr = mmap(_ptr, new_size, PROT_READ | PROT_WRITE, MAP_SHARED, _fd, 0);
#endif
#endif
return _ptr;
}
namespace {
template<typename S, typename Node>
inline Node* get_node_ptr(const void* _nodes, const size_t _s, const S i) {
return (Node*)((uint8_t *)_nodes + (_s * i));
}
template<typename T>
inline T dot(const T* x, const T* y, int f) {
T s = 0;
for (int z = 0; z < f; z++) {
s += (*x) * (*y);
x++;
y++;
}
return s;
}
template<typename T>
inline T manhattan_distance(const T* x, const T* y, int f) {
T d = 0.0;
for (int i = 0; i < f; i++)
d += fabs(x[i] - y[i]);
return d;
}
template<typename T>
inline T euclidean_distance(const T* x, const T* y, int f) {
// Don't use dot-product: avoid catastrophic cancellation in #314.
T d = 0.0;
for (int i = 0; i < f; ++i) {
const T tmp=*x - *y;
d += tmp * tmp;
++x;
++y;
}
return d;
}
#ifdef USE_AVX
// Horizontal single sum of 256bit vector.
inline float hsum256_ps_avx(__m256 v) {
const __m128 x128 = _mm_add_ps(_mm256_extractf128_ps(v, 1), _mm256_castps256_ps128(v));
const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128));
const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55));
return _mm_cvtss_f32(x32);
}
template<>
inline float dot<float>(const float* x, const float *y, int f) {
float result = 0;
if (f > 7) {
__m256 d = _mm256_setzero_ps();
for (; f > 7; f -= 8) {
d = _mm256_add_ps(d, _mm256_mul_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y)));
x += 8;
y += 8;
}
// Sum all floats in dot register.
result += hsum256_ps_avx(d);
}
// Don't forget the remaining values.
for (; f > 0; f--) {
result += *x * *y;
x++;
y++;
}
return result;
}
template<>
inline float manhattan_distance<float>(const float* x, const float* y, int f) {
float result = 0;
int i = f;
if (f > 7) {
__m256 manhattan = _mm256_setzero_ps();
__m256 minus_zero = _mm256_set1_ps(-0.0f);
for (; i > 7; i -= 8) {
const __m256 x_minus_y = _mm256_sub_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y));
const __m256 distance = _mm256_andnot_ps(minus_zero, x_minus_y); // Absolute value of x_minus_y (forces sign bit to zero)
manhattan = _mm256_add_ps(manhattan, distance);
x += 8;
y += 8;
}
// Sum all floats in manhattan register.
result = hsum256_ps_avx(manhattan);
}
// Don't forget the remaining values.
for (; i > 0; i--) {
result += fabsf(*x - *y);
x++;
y++;
}
return result;
}
template<>
inline float euclidean_distance<float>(const float* x, const float* y, int f) {
float result=0;
if (f > 7) {
__m256 d = _mm256_setzero_ps();
for (; f > 7; f -= 8) {
const __m256 diff = _mm256_sub_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y));
d = _mm256_add_ps(d, _mm256_mul_ps(diff, diff)); // no support for fmadd in AVX...
x += 8;
y += 8;
}
// Sum all floats in dot register.
result = hsum256_ps_avx(d);
}
// Don't forget the remaining values.
for (; f > 0; f--) {
float tmp = *x - *y;
result += tmp * tmp;
x++;
y++;
}
return result;
}
#endif
#ifdef USE_AVX512
template<>
inline float dot<float>(const float* x, const float *y, int f) {
float result = 0;
if (f > 15) {
__m512 d = _mm512_setzero_ps();
for (; f > 15; f -= 16) {
//AVX512F includes FMA
d = _mm512_fmadd_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y), d);
x += 16;
y += 16;
}
// Sum all floats in dot register.
result += _mm512_reduce_add_ps(d);
}
// Don't forget the remaining values.
for (; f > 0; f--) {
result += *x * *y;
x++;
y++;
}
return result;
}
template<>
inline float manhattan_distance<float>(const float* x, const float* y, int f) {
float result = 0;
int i = f;
if (f > 15) {
__m512 manhattan = _mm512_setzero_ps();
for (; i > 15; i -= 16) {
const __m512 x_minus_y = _mm512_sub_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y));
manhattan = _mm512_add_ps(manhattan, _mm512_abs_ps(x_minus_y));
x += 16;
y += 16;
}
// Sum all floats in manhattan register.
result = _mm512_reduce_add_ps(manhattan);
}
// Don't forget the remaining values.
for (; i > 0; i--) {
result += fabsf(*x - *y);
x++;
y++;
}
return result;
}
template<>
inline float euclidean_distance<float>(const float* x, const float* y, int f) {
float result=0;
if (f > 15) {
__m512 d = _mm512_setzero_ps();
for (; f > 15; f -= 16) {
const __m512 diff = _mm512_sub_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y));
d = _mm512_fmadd_ps(diff, diff, d);
x += 16;
y += 16;
}
// Sum all floats in dot register.
result = _mm512_reduce_add_ps(d);
}
// Don't forget the remaining values.
for (; f > 0; f--) {
float tmp = *x - *y;
result += tmp * tmp;
x++;
y++;
}
return result;
}
#endif
template<typename T>
inline T get_norm(T* v, int f) {
return sqrt(dot(v, v, f));
}
template<typename T, typename Random, typename Distance, typename Node>
inline void two_means(const vector<Node*>& nodes, int f, Random& random, bool cosine, Node* p, Node* q) {
/*
This algorithm is a huge heuristic. Empirically it works really well, but I
can't motivate it well. The basic idea is to keep two centroids and assign
points to either one of them. We weight each centroid by the number of points
assigned to it, so to balance it.
*/
static int iteration_steps = 200;
size_t count = nodes.size();
size_t i = random.index(count);
size_t j = random.index(count-1);
j += (j >= i); // ensure that i != j
Distance::template copy_node<T, Node>(p, nodes[i], f);
Distance::template copy_node<T, Node>(q, nodes[j], f);
if (cosine) { Distance::template normalize<T, Node>(p, f); Distance::template normalize<T, Node>(q, f); }
Distance::init_node(p, f);
Distance::init_node(q, f);
int ic = 1, jc = 1;
for (int l = 0; l < iteration_steps; l++) {
size_t k = random.index(count);
T di = ic * Distance::distance(p, nodes[k], f),
dj = jc * Distance::distance(q, nodes[k], f);
T norm = cosine ? get_norm(nodes[k]->v, f) : 1.0;
if (!(norm > T(0))) {
continue;
}
if (di < dj) {
for (int z = 0; z < f; z++)
p->v[z] = (p->v[z] * ic + nodes[k]->v[z] / norm) / (ic + 1);
Distance::init_node(p, f);
ic++;
} else if (dj < di) {
for (int z = 0; z < f; z++)
q->v[z] = (q->v[z] * jc + nodes[k]->v[z] / norm) / (jc + 1);
Distance::init_node(q, f);
jc++;
}
}
}
} // namespace
struct Base {
template<typename T, typename S, typename Node>
static inline void preprocess(void* nodes, size_t _s, const S node_count, const int f) {
// Override this in specific metric structs below if you need to do any pre-processing
// on the entire set of nodes passed into this index.
}
template<typename Node>
static inline void zero_value(Node* dest) {
// Initialize any fields that require sane defaults within this node.
}
template<typename T, typename Node>
static inline void copy_node(Node* dest, const Node* source, const int f) {
memcpy(dest->v, source->v, f * sizeof(T));
}
template<typename T, typename Node>
static inline void normalize(Node* node, int f) {
T norm = get_norm(node->v, f);
if (norm > 0) {
for (int z = 0; z < f; z++)
node->v[z] /= norm;
}
}
};
struct Angular : Base {
template<typename S, typename T>
struct ANNOY_NODE_ATTRIBUTE Node {
/*
* We store a binary tree where each node has two things
* - A vector associated with it
* - Two children
* All nodes occupy the same amount of memory
* All nodes with n_descendants == 1 are leaf nodes.
* A memory optimization is that for nodes with 2 <= n_descendants <= K,
* we skip the vector. Instead we store a list of all descendants. K is
* determined by the number of items that fits in the space of the vector.
* For nodes with n_descendants == 1 the vector is a data point.
* For nodes with n_descendants > K the vector is the normal of the split plane.
* Note that we can't really do sizeof(node<T>) because we cheat and allocate
* more memory to be able to fit the vector outside
*/
S n_descendants;
union {
S children[2]; // Will possibly store more than 2
T norm;
};
T v[1]; // We let this one overflow intentionally. Need to allocate at least 1 to make GCC happy
};
template<typename S, typename T>
static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) {
// want to calculate (a/|a| - b/|b|)^2
// = a^2 / a^2 + b^2 / b^2 - 2ab/|a||b|
// = 2 - 2cos
T pp = x->norm ? x->norm : dot(x->v, x->v, f); // For backwards compatibility reasons, we need to fall back and compute the norm here
T qq = y->norm ? y->norm : dot(y->v, y->v, f);
T pq = dot(x->v, y->v, f);
T ppqq = pp * qq;
if (ppqq > 0) return 2.0 - 2.0 * pq / sqrt(ppqq);
else return 2.0; // cos is 0
}
template<typename S, typename T>
static inline T margin(const Node<S, T>* n, const T* y, int f) {
return dot(n->v, y, f);
}
template<typename S, typename T, typename Random>
static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) {
T dot = margin(n, y, f);
if (dot != 0)
return (dot > 0);
else
return random.flip();
}
template<typename S, typename T, typename Random>
static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) {
Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid
Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid
two_means<T, Random, Angular, Node<S, T> >(nodes, f, random, true, p, q);
for (int z = 0; z < f; z++)
n->v[z] = p->v[z] - q->v[z];
Base::normalize<T, Node<S, T> >(n, f);
free(p);
free(q);
}
template<typename T>
static inline T normalized_distance(T distance) {
// Used when requesting distances from Python layer
// Turns out sometimes the squared distance is -0.0
// so we have to make sure it's a positive number.
return sqrt(std::max(distance, T(0)));
}
template<typename T>
static inline T pq_distance(T distance, T margin, int child_nr) {
if (child_nr == 0)
margin = -margin;
return std::min(distance, margin);
}
template<typename T>
static inline T pq_initial_value() {
return numeric_limits<T>::infinity();
}
template<typename S, typename T>
static inline void init_node(Node<S, T>* n, int f) {
n->norm = dot(n->v, n->v, f);
}
static const char* name() {
return "angular";
}
};
struct DotProduct : Angular {
template<typename S, typename T>
struct ANNOY_NODE_ATTRIBUTE Node {
/*
* This is an extension of the Angular node with an extra attribute for the scaled norm.
*/
S n_descendants;
S children[2]; // Will possibly store more than 2
T dot_factor;
T v[1]; // We let this one overflow intentionally. Need to allocate at least 1 to make GCC happy
};
static const char* name() {
return "dot";
}
template<typename S, typename T>
static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) {
return -dot(x->v, y->v, f);
}
template<typename Node>
static inline void zero_value(Node* dest) {
dest->dot_factor = 0;
}
template<typename S, typename T>
static inline void init_node(Node<S, T>* n, int f) {
}
template<typename T, typename Node>
static inline void copy_node(Node* dest, const Node* source, const int f) {
memcpy(dest->v, source->v, f * sizeof(T));
dest->dot_factor = source->dot_factor;
}
template<typename S, typename T, typename Random>
static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) {
Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid
Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid
DotProduct::zero_value(p);
DotProduct::zero_value(q);
two_means<T, Random, DotProduct, Node<S, T> >(nodes, f, random, true, p, q);
for (int z = 0; z < f; z++)
n->v[z] = p->v[z] - q->v[z];
n->dot_factor = p->dot_factor - q->dot_factor;
DotProduct::normalize<T, Node<S, T> >(n, f);
free(p);
free(q);
}
template<typename T, typename Node>
static inline void normalize(Node* node, int f) {
T norm = sqrt(dot(node->v, node->v, f) + pow(node->dot_factor, 2));
if (norm > 0) {
for (int z = 0; z < f; z++)
node->v[z] /= norm;
node->dot_factor /= norm;
}
}
template<typename S, typename T>
static inline T margin(const Node<S, T>* n, const T* y, int f) {
return dot(n->v, y, f) + (n->dot_factor * n->dot_factor);
}
template<typename S, typename T, typename Random>
static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) {
T dot = margin(n, y, f);
if (dot != 0)
return (dot > 0);
else
return random.flip();
}
template<typename T>
static inline T normalized_distance(T distance) {
return -distance;
}
template<typename T, typename S, typename Node>
static inline void preprocess(void* nodes, size_t _s, const S node_count, const int f) {
// This uses a method from Microsoft Research for transforming inner product spaces to cosine/angular-compatible spaces.
// (Bachrach et al., 2014, see https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf)
// Step one: compute the norm of each vector and store that in its extra dimension (f-1)
for (S i = 0; i < node_count; i++) {
Node* node = get_node_ptr<S, Node>(nodes, _s, i);
T norm = sqrt(dot(node->v, node->v, f));
if (isnan(norm)) norm = 0;
node->dot_factor = norm;
}
// Step two: find the maximum norm
T max_norm = 0;
for (S i = 0; i < node_count; i++) {
Node* node = get_node_ptr<S, Node>(nodes, _s, i);
if (node->dot_factor > max_norm) {
max_norm = node->dot_factor;
}
}
// Step three: set each vector's extra dimension to sqrt(max_norm^2 - norm^2)
for (S i = 0; i < node_count; i++) {
Node* node = get_node_ptr<S, Node>(nodes, _s, i);
T node_norm = node->dot_factor;
T dot_factor = sqrt(pow(max_norm, static_cast<T>(2.0)) - pow(node_norm, static_cast<T>(2.0)));
if (isnan(dot_factor)) dot_factor = 0;
node->dot_factor = dot_factor;
}
}
};
struct Hamming : Base {
template<typename S, typename T>
struct ANNOY_NODE_ATTRIBUTE Node {
S n_descendants;
S children[2];
T v[1];
};
static const size_t max_iterations = 20;
template<typename T>
static inline T pq_distance(T distance, T margin, int child_nr) {
return distance - (margin != (unsigned int) child_nr);
}
template<typename T>
static inline T pq_initial_value() {
return numeric_limits<T>::max();
}
template<typename T>
static inline int cole_popcount(T v) {
// Note: Only used with MSVC 9, which lacks intrinsics and fails to
// calculate std::bitset::count for v > 32bit. Uses the generalized
// approach by Eric Cole.
// See https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSet64
v = v - ((v >> 1) & (T)~(T)0/3);
v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);
v = (v + (v >> 4)) & (T)~(T)0/255*15;
return (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * 8;
}
template<typename S, typename T>
static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) {
size_t dist = 0;
for (int i = 0; i < f; i++) {
dist += popcount(x->v[i] ^ y->v[i]);
}
return dist;
}
template<typename S, typename T>
static inline bool margin(const Node<S, T>* n, const T* y, int f) {
static const size_t n_bits = sizeof(T) * 8;
T chunk = n->v[0] / n_bits;
return (y[chunk] & (static_cast<T>(1) << (n_bits - 1 - (n->v[0] % n_bits)))) != 0;
}
template<typename S, typename T, typename Random>
static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) {
return margin(n, y, f);
}
template<typename S, typename T, typename Random>
static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) {
size_t cur_size = 0;
size_t i = 0;
int dim = f * 8 * sizeof(T);
for (; i < max_iterations; i++) {
// choose random position to split at
n->v[0] = random.index(dim);
cur_size = 0;
for (typename vector<Node<S, T>*>::const_iterator it = nodes.begin(); it != nodes.end(); ++it) {
if (margin(n, (*it)->v, f)) {
cur_size++;
}
}
if (cur_size > 0 && cur_size < nodes.size()) {
break;
}
}
// brute-force search for splitting coordinate
if (i == max_iterations) {
int j = 0;
for (; j < dim; j++) {
n->v[0] = j;
cur_size = 0;
for (typename vector<Node<S, T>*>::const_iterator it = nodes.begin(); it != nodes.end(); ++it) {
if (margin(n, (*it)->v, f)) {
cur_size++;
}
}
if (cur_size > 0 && cur_size < nodes.size()) {
break;
}
}
}
}
template<typename T>
static inline T normalized_distance(T distance) {
return distance;
}
template<typename S, typename T>
static inline void init_node(Node<S, T>* n, int f) {
}
static const char* name() {
return "hamming";
}
};
struct Minkowski : Base {
template<typename S, typename T>
struct ANNOY_NODE_ATTRIBUTE Node {
S n_descendants;
T a; // need an extra constant term to determine the offset of the plane
S children[2];
T v[1];
};
template<typename S, typename T>
static inline T margin(const Node<S, T>* n, const T* y, int f) {
return n->a + dot(n->v, y, f);
}
template<typename S, typename T, typename Random>
static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) {
T dot = margin(n, y, f);
if (dot != 0)
return (dot > 0);
else
return random.flip();
}
template<typename T>
static inline T pq_distance(T distance, T margin, int child_nr) {
if (child_nr == 0)
margin = -margin;
return std::min(distance, margin);
}
template<typename T>
static inline T pq_initial_value() {
return numeric_limits<T>::infinity();
}
};
struct Euclidean : Minkowski {
template<typename S, typename T>
static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) {
return euclidean_distance(x->v, y->v, f);
}
template<typename S, typename T, typename Random>
static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) {
Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid
Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid
two_means<T, Random, Euclidean, Node<S, T> >(nodes, f, random, false, p, q);
for (int z = 0; z < f; z++)
n->v[z] = p->v[z] - q->v[z];
Base::normalize<T, Node<S, T> >(n, f);
n->a = 0.0;
for (int z = 0; z < f; z++)
n->a += -n->v[z] * (p->v[z] + q->v[z]) / 2;
free(p);
free(q);
}
template<typename T>
static inline T normalized_distance(T distance) {
return sqrt(std::max(distance, T(0)));
}
template<typename S, typename T>
static inline void init_node(Node<S, T>* n, int f) {
}
static const char* name() {
return "euclidean";
}
};
struct Manhattan : Minkowski {
template<typename S, typename T>
static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) {
return manhattan_distance(x->v, y->v, f);
}
template<typename S, typename T, typename Random>
static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) {
Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid
Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid
two_means<T, Random, Manhattan, Node<S, T> >(nodes, f, random, false, p, q);
for (int z = 0; z < f; z++)
n->v[z] = p->v[z] - q->v[z];
Base::normalize<T, Node<S, T> >(n, f);
n->a = 0.0;
for (int z = 0; z < f; z++)
n->a += -n->v[z] * (p->v[z] + q->v[z]) / 2;
free(p);
free(q);
}
template<typename T>
static inline T normalized_distance(T distance) {
return std::max(distance, T(0));
}
template<typename S, typename T>
static inline void init_node(Node<S, T>* n, int f) {
}
static const char* name() {
return "manhattan";
}
};
template<typename S, typename T>
class AnnoyIndexInterface {
public:
virtual ~AnnoyIndexInterface() {};
virtual bool add_item(S item, const T* w, char** error=NULL) = 0;
virtual bool build(int q, char** error=NULL) = 0;
virtual bool unbuild(char** error=NULL) = 0;
virtual bool save(const char* filename, bool prefault=false, char** error=NULL) = 0;
virtual void unload() = 0;
virtual bool load(const char* filename, bool prefault=false, char** error=NULL) = 0;
virtual T get_distance(S i, S j) const = 0;
virtual void get_nns_by_item(S item, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const = 0;
virtual void get_nns_by_vector(const T* w, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const = 0;
virtual S get_n_items() const = 0;
virtual S get_n_trees() const = 0;
virtual void verbose(bool v) = 0;
virtual void get_item(S item, T* v) const = 0;
virtual void set_seed(int q) = 0;
virtual bool on_disk_build(const char* filename, char** error=NULL) = 0;
};
template<typename S, typename T, typename Distance, typename Random>
class AnnoyIndex : public AnnoyIndexInterface<S, T> {
/*
* We use random projection to build a forest of binary trees of all items.
* Basically just split the hyperspace into two sides by a hyperplane,
* then recursively split each of those subtrees etc.
* We create a tree like this q times. The default q is determined automatically
* in such a way that we at most use 2x as much memory as the vectors take.
*/
public:
typedef Distance D;
typedef typename D::template Node<S, T> Node;
protected:
const int _f;
size_t _s;
S _n_items;
Random _random;
void* _nodes; // Could either be mmapped, or point to a memory buffer that we reallocate
S _n_nodes;
S _nodes_size;
vector<S> _roots;
S _K;
bool _loaded;
bool _verbose;
int _fd;
bool _on_disk;
bool _built;
public:
AnnoyIndex(int f) : _f(f), _random() {
_s = offsetof(Node, v) + _f * sizeof(T); // Size of each node
_verbose = false;
_built = false;
_K = (S) (((size_t) (_s - offsetof(Node, children))) / sizeof(S)); // Max number of descendants to fit into node
reinitialize(); // Reset everything
}
~AnnoyIndex() {
unload();
}
int get_f() const {
return _f;
}
bool add_item(S item, const T* w, char** error=NULL) {
return add_item_impl(item, w, error);
}
template<typename W>
bool add_item_impl(S item, const W& w, char** error=NULL) {
if (_loaded) {
showUpdate("You can't add an item to a loaded index\n");
if (error) *error = (char *)"You can't add an item to a loaded index";
return false;
}
_allocate_size(item + 1);
Node* n = _get(item);
D::zero_value(n);
n->children[0] = 0;
n->children[1] = 0;
n->n_descendants = 1;
for (int z = 0; z < _f; z++)
n->v[z] = w[z];
D::init_node(n, _f);
if (item >= _n_items)
_n_items = item + 1;
return true;
}
bool on_disk_build(const char* file, char** error=NULL) {
_on_disk = true;
_fd = open(file, O_RDWR | O_CREAT | O_TRUNC, (int) 0600);
if (_fd == -1) {
showUpdate("Error: file descriptor is -1\n");
if (error) *error = strerror(errno);
_fd = 0;
return false;
}
_nodes_size = 1;
if (ftruncate(_fd, _s * _nodes_size) == -1) {
showUpdate("Error truncating file: %s\n", strerror(errno));
if (error) *error = strerror(errno);
return false;
}
#ifdef MAP_POPULATE
_nodes = (Node*) mmap(0, _s * _nodes_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, _fd, 0);
#else
_nodes = (Node*) mmap(0, _s * _nodes_size, PROT_READ | PROT_WRITE, MAP_SHARED, _fd, 0);
#endif
return true;
}
struct ThreadNodes {
ThreadNodes() :
_n_nodes(0),
_nodes_size(0),
_nodes(NULL)
{}
// actual num nodes added so far
S _n_nodes;
// buffer of num nodes created
S _nodes_size;
// nodes buffer
void* _nodes;
// roots in this thread
vector<S> _roots;
};
bool build(int q, char** error=NULL) {
if (_loaded) {
showUpdate("You can't build a loaded index\n");
if (error) *error = (char *)"You can't build a loaded index";
return false;
}
if (_built) {
showUpdate("You can't build a built index\n");
if (error) *error = (char *)"You can't build a built index";
return false;
}
D::template preprocess<T, S, Node>(_nodes, _s, _n_items, _f);
_n_nodes = _n_items;
if (q != -1) {
int num_threads = 16;
vector<ThreadNodes> threads_buffer(num_threads);
#pragma omp parallel for num_threads(num_threads)
for (int tree = 0; tree < q; tree++) {
int thread_id = omp_get_thread_num();
ThreadNodes& thread_nodes = threads_buffer[thread_id];
if (_verbose) showUpdate("pass %zd...\n", tree);
//showUpdate("pass %zd with parallel for...\n", tree);
vector<S> indices;
for (S i = 0; i < _n_items; i++) {
if (_get(i)->n_descendants >= 1) // Issue #223
indices.push_back(i);
}
S root = _make_tree(indices, true, thread_nodes);
thread_nodes._roots.push_back(root);
}
// combine the results and recalculate the node index
{
int n_all_nodes = _n_nodes;
for (int i = 0; i < threads_buffer.size(); i++) {
ThreadNodes& thread_nodes = threads_buffer[i];
for (int j = 0; j < thread_nodes._n_nodes; j++) {
Node* node = (Node*)((char*)thread_nodes._nodes + _s * j);
// leaf node
if (node->n_descendants <= _K) {
for (int k = 0; k < node->n_descendants; k++) {
// add current n_all_nodes to indices as we will be combining buffers into one
node->children[k] += n_all_nodes;
}
} else { // splitting node
// add current n_all_nodes to indices as we will be combining buffers into one
node->children[0] += n_all_nodes;
node->children[1] += n_all_nodes;
}
}
for (int j = 0; j < thread_nodes._roots.size(); j++) {
// add current n_all_nodes to indices as we will be combining buffers into one
thread_nodes._roots[j] += n_all_nodes;
_roots.push_back(thread_nodes._roots[j]);
}
n_all_nodes += thread_nodes._n_nodes;
}
// allocate the one combined buffer
_allocate_size(n_all_nodes);
// copy from thread buffer into combined buffer
int bytes_offset = _s * _n_nodes;
for (int i = 0; i < threads_buffer.size(); i++) {
ThreadNodes& thread_nodes = threads_buffer[i];
int thread_bytes = thread_nodes._n_nodes * _s;
memcpy((char*)_nodes + bytes_offset, thread_nodes._nodes, thread_bytes);
bytes_offset += thread_bytes;
}
_n_nodes = n_all_nodes;
}
} else {
while (_n_nodes < _n_items * 2) {
if (_verbose) showUpdate("pass %zd...\n", _roots.size());
//showUpdate("pass %zd...\n", _roots.size());
vector<S> indices;
for (S i = 0; i < _n_items; i++) {
if (_get(i)->n_descendants >= 1) // Issue #223
indices.push_back(i);
}
_roots.push_back(_make_tree(indices, true));
}
}
// Also, copy the roots into the last segment of the array
// This way we can load them faster without reading the whole file
_allocate_size(_n_nodes + (S)_roots.size());
for (size_t i = 0; i < _roots.size(); i++)
memcpy(_get(_n_nodes + (S)i), _get(_roots[i]), _s);
_n_nodes += _roots.size();
if (_verbose) showUpdate("has %d nodes\n", _n_nodes);
if (_on_disk) {
_nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * _n_nodes);
if (ftruncate(_fd, _s * _n_nodes)) {
// TODO: this probably creates an index in a corrupt state... not sure what to do
showUpdate("Error truncating file: %s\n", strerror(errno));
if (error) *error = strerror(errno);
return false;
}
_nodes_size = _n_nodes;
}
_built = true;
return true;
}
bool unbuild(char** error=NULL) {
if (_loaded) {
showUpdate("You can't unbuild a loaded index\n");
if (error) *error = (char *)"You can't unbuild a loaded index";
return false;
}
_roots.clear();
_n_nodes = _n_items;
_built = false;
return true;
}
bool save(const char* filename, bool prefault=false, char** error=NULL) {
if (!_built) {
showUpdate("You can't save an index that hasn't been built\n");
if (error) *error = (char *)"You can't save an index that hasn't been built";
return false;
}
if (_on_disk) {
return true;
} else {
// Delete file if it already exists (See issue #335)
unlink(filename);
printf("path: %s\n", filename);
FILE *f = fopen(filename, "wb");
if (f == NULL) {
showUpdate("Unable to open: %s\n", strerror(errno));
if (error) *error = strerror(errno);
return false;
}
if (fwrite(_nodes, _s, _n_nodes, f) != (size_t) _n_nodes) {
showUpdate("Unable to write: %s\n", strerror(errno));
if (error) *error = strerror(errno);
return false;
}
if (fclose(f) == EOF) {
showUpdate("Unable to close: %s\n", strerror(errno));
if (error) *error = strerror(errno);
return false;
}
unload();
return load(filename, prefault, error);
}
}
void reinitialize() {
_fd = 0;
_nodes = NULL;
_loaded = false;
_n_items = 0;
_n_nodes = 0;
_nodes_size = 0;
_on_disk = false;
_roots.clear();
}
void unload() {
if (_on_disk && _fd) {
close(_fd);
munmap(_nodes, _s * _nodes_size);
} else {
if (_fd) {
// we have mmapped data
close(_fd);
munmap(_nodes, _n_nodes * _s);
} else if (_nodes) {
// We have heap allocated data
free(_nodes);
}
}
reinitialize();
if (_verbose) showUpdate("unloaded\n");
}
bool load(const char* filename, bool prefault=false, char** error=NULL) {
_fd = open(filename, O_RDONLY, (int)0400);
if (_fd == -1) {
showUpdate("Error: file descriptor is -1\n");
if (error) *error = strerror(errno);
_fd = 0;
return false;
}
off_t size = lseek(_fd, 0, SEEK_END);
if (size == -1) {
showUpdate("lseek returned -1\n");
if (error) *error = strerror(errno);
return false;
} else if (size == 0) {
showUpdate("Size of file is zero\n");
if (error) *error = (char *)"Size of file is zero";
return false;
} else if (size % _s) {
// Something is fishy with this index!
showUpdate("Error: index size %zu is not a multiple of vector size %zu\n", (size_t)size, _s);
if (error) *error = (char *)"Index size is not a multiple of vector size";
return false;
}
int flags = MAP_SHARED;
if (prefault) {
#ifdef MAP_POPULATE
flags |= MAP_POPULATE;
#else
showUpdate("prefault is set to true, but MAP_POPULATE is not defined on this platform");
#endif
}
_nodes = (Node*)mmap(0, size, PROT_READ, flags, _fd, 0);
_n_nodes = (S)(size / _s);
// Find the roots by scanning the end of the file and taking the nodes with most descendants
_roots.clear();
S m = -1;
for (S i = _n_nodes - 1; i >= 0; i--) {
S k = _get(i)->n_descendants;
if (m == -1 || k == m) {
_roots.push_back(i);
m = k;
} else {
break;
}
}
// hacky fix: since the last root precedes the copy of all roots, delete it
if (_roots.size() > 1 && _get(_roots.front())->children[0] == _get(_roots.back())->children[0])
_roots.pop_back();
_loaded = true;
_built = true;
_n_items = m;
if (_verbose) showUpdate("found %lu roots with degree %d\n", _roots.size(), m);
return true;
}
T get_distance(S i, S j) const {
return D::normalized_distance(D::distance(_get(i), _get(j), _f));
}
void get_nns_by_item(S item, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const {
// TODO: handle OOB
const Node* m = _get(item);
_get_all_nns(m->v, n, search_k, result, distances);
}
void get_nns_by_vector(const T* w, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const {
_get_all_nns(w, n, search_k, result, distances);
}
S get_n_items() const {
return _n_items;
}
S get_n_trees() const {
return _roots.size();
}
void verbose(bool v) {
_verbose = v;
}
void get_item(S item, T* v) const {
// TODO: handle OOB
Node* m = _get(item);
memcpy(v, m->v, (_f) * sizeof(T));
}
void set_seed(int seed) {
_random.set_seed(seed);
}
protected:
void _allocate_size(S n) {
if (n > _nodes_size) {
const double reallocation_factor = 1.3;
S new_nodes_size = std::max(n, (S) ((_nodes_size + 1) * reallocation_factor));
void *old = _nodes;
if (_on_disk) {
int rc = ftruncate(_fd, _s * new_nodes_size);
if (_verbose && rc) showUpdate("File truncation error\n");
_nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * new_nodes_size);
} else {
_nodes = realloc(_nodes, _s * new_nodes_size);
memset((char *) _nodes + (_nodes_size * _s) / sizeof(char), 0, (new_nodes_size - _nodes_size) * _s);
}
_nodes_size = new_nodes_size;
if (_verbose) showUpdate("Reallocating to %d nodes: old_address=%p, new_address=%p\n", new_nodes_size, old, _nodes);
}
}
void _allocate_size(S n, ThreadNodes& thread_nodes) {
if (n > thread_nodes._nodes_size) {
const double reallocation_factor = 1.3;
S new_nodes_size = std::max(n, (S) ((thread_nodes._nodes_size + 1) * reallocation_factor));
void *old = thread_nodes._nodes;
// Don't support on disk for multi thread tree building now
// if (_on_disk) {
// int rc = ftruncate(_fd, _s * new_nodes_size);
// if (_verbose && rc) showUpdate("File truncation error\n");
// _nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * new_nodes_size);
// } else {
thread_nodes._nodes = realloc(thread_nodes._nodes, _s * new_nodes_size);
memset((char *) thread_nodes._nodes + (thread_nodes._nodes_size * _s) / sizeof(char),
0,
(new_nodes_size - thread_nodes._nodes_size) * _s);
//}
thread_nodes._nodes_size = new_nodes_size;
if (_verbose) showUpdate("Reallocating to %d nodes: old_address=%p, new_address=%p\n", new_nodes_size, old, thread_nodes._nodes);
}
}
inline Node* _get(const S i) const {
return get_node_ptr<S, Node>(_nodes, _s, i);
}
inline Node* _get(const S i, ThreadNodes& thread_nodes) const {
return get_node_ptr<S, Node>(thread_nodes._nodes, _s, i);
}
S _make_tree(const vector<S >& indices, bool is_root, ThreadNodes& thread_nodes) {
// The basic rule is that if we have <= _K items, then it's a leaf node, otherwise it's a split node.
// There's some regrettable complications caused by the problem that root nodes have to be "special":
// 1. We identify root nodes by the arguable logic that _n_items == n->n_descendants, regardless of how many descendants they actually have
// 2. Root nodes with only 1 child need to be a "dummy" parent
// 3. Due to the _n_items "hack", we need to be careful with the cases where _n_items <= _K or _n_items > _K
if (indices.size() == 1 && !is_root)
return indices[0];
if (indices.size() <= (size_t)_K && (!is_root || (size_t)_n_items <= (size_t)_K || indices.size() == 1)) {
_allocate_size(thread_nodes._n_nodes + 1, thread_nodes);
S item = thread_nodes._n_nodes++;
Node* m = _get(item, thread_nodes);
m->n_descendants = is_root ? _n_items : (S)indices.size();
// Using std::copy instead of a loop seems to resolve issues #3 and #13,
// probably because gcc 4.8 goes overboard with optimizations.
// Using memcpy instead of std::copy for MSVC compatibility. #235
// Only copy when necessary to avoid crash in MSVC 9. #293
if (!indices.empty())
memcpy(m->children, &indices[0], indices.size() * sizeof(S));
return item;
}
vector<Node*> children;
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
Node* n = _get(j);
if (n)
children.push_back(n);
}
Node* m = (Node*)malloc(_s); // TODO: avoid
D::create_split(children, _f, _s, _random, m);
vector<S> children_indices[2];
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
Node* n = _get(j);
if (n) {
bool side = D::side(m, n->v, _f, _random);
children_indices[side].push_back(j);
} else {
showUpdate("No node for index %d?\n", j);
}
}
// If we didn't find a hyperplane, just randomize sides as a last option
while (children_indices[0].size() == 0 || children_indices[1].size() == 0) {
if (_verbose)
showUpdate("\tNo hyperplane found (left has %ld children, right has %ld children)\n",
children_indices[0].size(), children_indices[1].size());
if (_verbose && indices.size() > 100000)
showUpdate("Failed splitting %lu items\n", indices.size());
children_indices[0].clear();
children_indices[1].clear();
// Set the vector to 0.0
for (int z = 0; z < _f; z++)
m->v[z] = 0.0;
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
// Just randomize...
children_indices[_random.flip()].push_back(j);
}
}
int flip = (children_indices[0].size() > children_indices[1].size());
m->n_descendants = is_root ? _n_items : (S)indices.size();
for (int side = 0; side < 2; side++) {
// run _make_tree for the smallest child first (for cache locality)
m->children[side^flip] = _make_tree(children_indices[side^flip], false, thread_nodes);
}
_allocate_size(thread_nodes._n_nodes + 1, thread_nodes);
S item = thread_nodes._n_nodes++;
memcpy(_get(item, thread_nodes), m, _s);
free(m);
return item;
}
S _make_tree(const vector<S >& indices, bool is_root) {
// The basic rule is that if we have <= _K items, then it's a leaf node, otherwise it's a split node.
// There's some regrettable complications caused by the problem that root nodes have to be "special":
// 1. We identify root nodes by the arguable logic that _n_items == n->n_descendants, regardless of how many descendants they actually have
// 2. Root nodes with only 1 child need to be a "dummy" parent
// 3. Due to the _n_items "hack", we need to be careful with the cases where _n_items <= _K or _n_items > _K
if (indices.size() == 1 && !is_root)
return indices[0];
if (indices.size() <= (size_t)_K && (!is_root || (size_t)_n_items <= (size_t)_K || indices.size() == 1)) {
_allocate_size(_n_nodes + 1);
S item = _n_nodes++;
Node* m = _get(item);
m->n_descendants = is_root ? _n_items : (S)indices.size();
// Using std::copy instead of a loop seems to resolve issues #3 and #13,
// probably because gcc 4.8 goes overboard with optimizations.
// Using memcpy instead of std::copy for MSVC compatibility. #235
// Only copy when necessary to avoid crash in MSVC 9. #293
if (!indices.empty())
memcpy(m->children, &indices[0], indices.size() * sizeof(S));
return item;
}
vector<Node*> children;
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
Node* n = _get(j);
if (n)
children.push_back(n);
}
vector<S> children_indices[2];
Node* m = (Node*)malloc(_s); // TODO: avoid
D::create_split(children, _f, _s, _random, m);
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
Node* n = _get(j);
if (n) {
bool side = D::side(m, n->v, _f, _random);
children_indices[side].push_back(j);
} else {
showUpdate("No node for index %d?\n", j);
}
}
// If we didn't find a hyperplane, just randomize sides as a last option
while (children_indices[0].size() == 0 || children_indices[1].size() == 0) {
if (_verbose)
showUpdate("\tNo hyperplane found (left has %ld children, right has %ld children)\n",
children_indices[0].size(), children_indices[1].size());
if (_verbose && indices.size() > 100000)
showUpdate("Failed splitting %lu items\n", indices.size());
children_indices[0].clear();
children_indices[1].clear();
// Set the vector to 0.0
for (int z = 0; z < _f; z++)
m->v[z] = 0.0;
for (size_t i = 0; i < indices.size(); i++) {
S j = indices[i];
// Just randomize...
children_indices[_random.flip()].push_back(j);
}
}
int flip = (children_indices[0].size() > children_indices[1].size());
m->n_descendants = is_root ? _n_items : (S)indices.size();
for (int side = 0; side < 2; side++) {
// run _make_tree for the smallest child first (for cache locality)
m->children[side^flip] = _make_tree(children_indices[side^flip], false);
}
_allocate_size(_n_nodes + 1);
S item = _n_nodes++;
memcpy(_get(item), m, _s);
free(m);
return item;
}
void _get_all_nns(const T* v, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const {
Node* v_node = (Node *)malloc(_s); // TODO: avoid
D::template zero_value<Node>(v_node);
memcpy(v_node->v, v, sizeof(T) * _f);
D::init_node(v_node, _f);
std::priority_queue<pair<T, S> > q;
if (search_k == (size_t)-1) {
search_k = n * _roots.size();
}
for (size_t i = 0; i < _roots.size(); i++) {
q.push(make_pair(Distance::template pq_initial_value<T>(), _roots[i]));
}
std::vector<S> nns;
while (nns.size() < search_k && !q.empty()) {
const pair<T, S>& top = q.top();
T d = top.first;
S i = top.second;
Node* nd = _get(i);
q.pop();
if (nd->n_descendants == 1 && i < _n_items) {
nns.push_back(i);
} else if (nd->n_descendants <= _K) {
const S* dst = nd->children;
nns.insert(nns.end(), dst, &dst[nd->n_descendants]);
} else {
T margin = D::margin(nd, v, _f);
q.push(make_pair(D::pq_distance(d, margin, 1), static_cast<S>(nd->children[1])));
q.push(make_pair(D::pq_distance(d, margin, 0), static_cast<S>(nd->children[0])));
}
}
// Get distances for all items
// To avoid calculating distance multiple times for any items, sort by id
std::sort(nns.begin(), nns.end());
vector<pair<T, S> > nns_dist;
S last = -1;
for (size_t i = 0; i < nns.size(); i++) {
S j = nns[i];
if (j == last)
continue;
last = j;
if (_get(j)->n_descendants == 1) // This is only to guard a really obscure case, #284
nns_dist.push_back(make_pair(D::distance(v_node, _get(j), _f), j));
}
size_t m = nns_dist.size();
size_t p = n < m ? n : m; // Return this many items
std::partial_sort(nns_dist.begin(), nns_dist.begin() + p, nns_dist.end());
for (size_t i = 0; i < p; i++) {
if (distances)
distances->push_back(D::normalized_distance(nns_dist[i].first));
result->push_back(nns_dist[i].second);
}
free(v_node);
}
};
#endif
// vim: tabstop=2 shiftwidth=2
|
GB_binop__bor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint16)
// A*D function (colscale): GB (_AxD__bor_uint16)
// D*A function (rowscale): GB (_DxB__bor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint16)
// C=scalar+B GB (_bind1st__bor_uint16)
// C=scalar+B' GB (_bind1st_tran__bor_uint16)
// C=A+scalar GB (_bind2nd__bor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
solving_strategy.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_SOLVING_STRATEGY )
#define KRATOS_SOLVING_STRATEGY
/* System includes */
/* External includes */
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** @brief Solving strategy base class
* @details This is the base class from which we will derive all the strategies (line-search, NR, etc...)
*/
template<class TSparseSpace,
class TDenseSpace,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class SolvingStrategy
{
public:
///@name Type Definitions
///@{
// typedef std::set<Dof::Pointer,ComparePDof> DofSetType;
typedef typename TSparseSpace::DataType TDataType;
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
typedef typename TSparseSpace::MatrixPointerType TSystemMatrixPointerType;
typedef typename TSparseSpace::VectorPointerType TSystemVectorPointerType;
typedef typename TDenseSpace::MatrixType LocalSystemMatrixType;
typedef typename TDenseSpace::VectorType LocalSystemVectorType;
typedef Scheme<TSparseSpace, TDenseSpace> TSchemeType;
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> TBuilderAndSolverType;
typedef typename ModelPart::DofType TDofType;
typedef typename ModelPart::DofsArrayType DofsArrayType;
// typedef Dof<TDataType> TDofType;
// typedef PointerVectorSet<TDofType, IdentityFunction<TDofType> > DofsArrayType;
// typedef PointerVectorSet<TDofType, IndexedObject> DofsArrayType;
typedef typename DofsArrayType::iterator DofIteratorType;
typedef typename DofsArrayType::const_iterator DofConstantIteratorType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION(SolvingStrategy);
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
* @param rModelPart The model part of the problem
* @param ThisParameters The configuration parameters
*/
explicit SolvingStrategy(ModelPart& rModelPart, Parameters ThisParameters)
: mrModelPart(rModelPart)
{
const bool move_mesh_flag = ThisParameters.Has("move_mesh_flag") ? ThisParameters["move_mesh_flag"].GetBool() : false;
SetMoveMeshFlag(move_mesh_flag);
}
/**
* @brief Default constructor.
* @param rModelPart The model part to be computed
* @param MoveMeshFlag The flag to set if the mesh is moved or not
*/
explicit SolvingStrategy(
ModelPart& rModelPart,
bool MoveMeshFlag = false
) : mrModelPart(rModelPart)
{
SetMoveMeshFlag(MoveMeshFlag);
}
/** Destructor.
*/
virtual ~SolvingStrategy(){}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
virtual void Predict()
{
}
/**
* @brief Initialization of member variables and prior operations
*/
virtual void Initialize()
{
}
/**
* @brief The problem of interest is solved.
* @details
* {
* This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
* }
*/
virtual double Solve()
{
Initialize();
InitializeSolutionStep();
Predict();
SolveSolutionStep();
FinalizeSolutionStep();
return 0.0;
}
/**
* @brief Clears the internal storage
*/
virtual void Clear()
{
}
/**
* @brief This should be considered as a "post solution" convergence check which is useful for coupled analysis
* @details The convergence criteria used is the one used inside the "solve" step
*/
virtual bool IsConverged()
{
return true;
}
/**
* @brief This operations should be called before printing the results when non trivial results (e.g. stresses)
* need to be calculated given the solution of the step
* @details This operations should be called only when needed, before printing as it can involve a non negligible cost
*/
virtual void CalculateOutputData()
{
}
/**
* @brief Performs all the required operations that should be done (for each step) before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
virtual void InitializeSolutionStep()
{
}
/**
* @brief Performs all the required operations that should be done (for each step) after solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
virtual void FinalizeSolutionStep()
{
}
/**
* @brief Solves the current step. This function returns true if a solution has been found, false otherwise.
*/
virtual bool SolveSolutionStep()
{
return true;
}
/**
* @brief This sets the level of echo for the solving strategy
* @param Level of echo for the solving strategy
* @details
* {
* 0 -> Mute... no echo at all
* 1 -> Printing time and basic informations
* 2 -> Printing linear solver data
* 3 -> Print of debug informations: Echo of stiffness matrix, Dx, b...
* }
*/
virtual void SetEchoLevel(const int Level)
{
mEchoLevel = Level;
}
/**
* @brief This returns the level of echo for the solving strategy
* @details
* {
* 0 -> Mute... no echo at all
* 1 -> Printing time and basic informations
* 2 -> Printing linear solver data
* 3 -> Print of debug informations: Echo of stiffness matrix, Dx, b...
* }
* @return Level of echo for the solving strategy
*/
virtual int GetEchoLevel()
{
return mEchoLevel;
}
/**
* This sets the build level
* @param Level The build level
* @details
* {
* 0 -> Build StiffnessMatrix just once
* 1 -> Build StiffnessMatrix at the beginning of each solution step
* 2 -> build StiffnessMatrix at each iteration
* }
*/
virtual void SetRebuildLevel(int Level)
{
mRebuildLevel = Level;
mStiffnessMatrixIsBuilt = false;
}
/**
* @brief This returns the build level
* @details
* {
* 0 -> Build StiffnessMatrix just once
* 1 -> Build StiffnessMatrix at the beginning of each solution step
* 2 -> build StiffnessMatrix at each iteration
* }
* @return The build level
*/
virtual int GetRebuildLevel()
{
return mRebuildLevel;
}
/**
* @brief This function sets the flag that says if the mesh is moved
* @param Flag True if the mesh is moved, false otherwise
*/
void SetMoveMeshFlag(bool Flag)
{
mMoveMeshFlag = Flag;
}
/**
* @brief This function returns the flag that says if the mesh is moved
* @return True if the mesh is moved, false otherwise
*/
bool MoveMeshFlag()
{
return mMoveMeshFlag;
}
/**
* @brief This function is designed to move the mesh
* @note Be careful it just consider displacements, derive this method to adapt to your own strategies (ALE, FSI, etc...)
*/
virtual void MoveMesh()
{
KRATOS_TRY
KRATOS_ERROR_IF(GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) << "It is impossible to move the mesh since the DISPLACEMENT var is not in the Model Part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& NodesArray = GetModelPart().Nodes();
const int numNodes = static_cast<int>(NodesArray.size());
#pragma omp parallel for
for(int i = 0; i < numNodes; ++i) {
auto it_node = NodesArray.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
KRATOS_INFO_IF("SolvingStrategy", this->GetEchoLevel() != 0 && GetModelPart().GetCommunicator().MyPID() == 0) <<" MESH MOVED "<<std::endl;
KRATOS_CATCH("")
}
/**
* @brief Operations to get the pointer to the model
* @return mrModelPart: The model part member variable
*/
inline ModelPart& GetModelPart()
{
return mrModelPart;
};
/**
* @brief Operations to get the residual norm
* @return The residual norm
*/
virtual double GetResidualNorm()
{
return 0.0;
}
/**
* @brief Function to perform expensive checks.
* @details It is designed to be called ONCE to verify that the input is correct.
*/
virtual int Check()
{
KRATOS_TRY
// Check if displacement var is needed
if (mMoveMeshFlag == true)
{
for (ModelPart::NodesContainerType::iterator itNode = GetModelPart().NodesBegin();
itNode != GetModelPart().NodesEnd(); itNode++)
{
if (itNode->SolutionStepsDataHas(DISPLACEMENT) == false)
{
KRATOS_ERROR << "ERROR:: Problem on node with Id " << itNode->Id() << "\nIt is impossible to move the mesh since the DISPLACEMENT var is not in the rModelPart. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
}
}
}
for (ModelPart::ElementsContainerType::iterator it_elem = GetModelPart().ElementsBegin();
it_elem != GetModelPart().ElementsEnd(); it_elem++)
{
it_elem->Check(GetModelPart().GetProcessInfo());
}
for (ModelPart::ConditionsContainerType::iterator it_cond = GetModelPart().ConditionsBegin();
it_cond != GetModelPart().ConditionsEnd(); it_cond++)
{
it_cond->Check(GetModelPart().GetProcessInfo());
}
return 0;
KRATOS_CATCH("")
}
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return "SolvingStrategy";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
rOStream << Info();
}
///@}
protected:
///@name Protected static Member Variables
///@{
// Level of echo for the solving strategy
int mEchoLevel;
// Settings for the rebuilding of the stiffness matrix
int mRebuildLevel;
bool mStiffnessMatrixIsBuilt;
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
private:
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrModelPart;
bool mMoveMeshFlag;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/** Copy constructor.
*/
SolvingStrategy(const SolvingStrategy& Other);
///@}
}; /* Class NewSolvingStrategy */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_SOLVING_STRATEGY defined */
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "gather_param.h"
typedef struct
{
int* in_shape; // the dim of the input
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// printf("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
// #pragma omp parallel for num_threads(num_thread)
if(param->is_onnx){
for (int outer = 0; outer < outer_size; ++outer)
{
memcpy(out_ptr + (outer * param->indices_num ) * inner_size,
in_ptr + (outer* axis_size + param->indices_num) * inner_size, inner_size* sizeof(float));
}
} else {
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct ir_tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// printf("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
set_tengine_errno(ENOMEM);
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
gather_param_t* op_param = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_param->in_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_gather_ops(void* arg)
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
static int unreg_gather_ops(void* arg)
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
AUTO_REGISTER_OPS(reg_gather_ops);
AUTO_UNREGISTER_OPS(unreg_gather_ops);
|
omp_reduction.c | /******************************************************************************
* FILE: omp_reduction.c
* DESCRIPTION:
* OpenMP Example - Combined Parallel Loop Reduction - C/C++ Version
* This example demonstrates a sum reduction within a combined parallel loop
* construct. Notice that default data element scoping is assumed - there
* are no clauses specifying shared or private variables. OpenMP will
* automatically make loop index variables private within team threads, and
* global variables shared.
* AUTHOR: Blaise Barney 5/99
* LAST REVISED: 04/06/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int i, n;
float a[100], b[100], sum;
/* Some initializations */
n = 100;
for (i=0; i < n; i++)
a[i] = b[i] = i * 1.0;
sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i=0; i < n; i++)
sum = sum + (a[i] * b[i]);
printf(" Sum = %f\n",sum);
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "im2col.h"
#include "dark_cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdint.h>
#ifdef _WIN32
#include <intrin.h>
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#define TILE_M 4 // 4 ops
#define TILE_N 16 // AVX2 = 2 ops * 8 floats
#define TILE_K 16 // loop
#ifdef __cplusplus
#define PUT_IN_REGISTER
#else
#define PUT_IN_REGISTER register
#endif
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float* m = (float*)xcalloc(rows * cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
//--------------------------------------------
// XNOR bitwise GEMM for binary neural network
//--------------------------------------------
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k*ldb + j);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
char b_bit = get_bit(B, j*ldb + k);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
const char a_bit = get_bit(A, i*lda + k);
uint64_t a_bit64 = fill_bit_int64(a_bit);
int k_ldb = k*ldb;
for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
if ((N - j > 64) && (k_ldb % 8 == 0)) {
uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
//printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
printf("\n %d \n", __popcnt64(c_bit64)); // msvs
int h;
for (h = 0; h < 64; ++h)
if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
//binary_int64_printf(a_bit64);
//binary_int64_printf(b_bit64);
//binary_int64_printf(c_bit64);
}
else {
for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k_ldb + j);
if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
}
}
}
}
}
if (mean_arr) {
//int K_2 = K / 2;
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
//float mean_val2 = 2 * mean_val;
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
//C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
}
}
}
else {
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
}
}
}
free(count_arr);
//getchar();
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
#ifdef WIN32
int tmp_count = __popcnt64(c_bit64);
#else
int tmp_count = __builtin_popcountll(c_bit64);
#endif
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
*/
//----------------------------
// is not used
/*
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
*/
#ifndef GPU
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
}
}
}
unsigned char reverse_byte_1(char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
unsigned char reverse_byte(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
static unsigned char lookup[16] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
unsigned char reverse_byte_3(unsigned char n) {
// Reverse the top and bottom nibble then swap them.
return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
}
void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
x = y = 0;
// Load the array and pack it into x and y.
//x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
//y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
/*
// transpose by 8-bit
void transpose_bin(char *A, char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 8) {
int j;
for (j = 0; j < m; j += 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
//transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
*/
#endif
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
//printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
}
}
}
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
//----------------------------
#if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__))
#if (defined(_WIN64) && !defined(__MINGW64__))
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
switch(index) {
case 0:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
case 1:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1));
case 2:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2));
case 3:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3));
case 4:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4));
case 5:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5));
case 6:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6));
case 7:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7));
default:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
}
}
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
#ifdef _WIN32
// Windows
#define cpuid(info, x) __cpuidex(info, x, 0)
#else
// GCC Intrinsics
void cpuid(int info[4], int InfoType) {
__cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
}
#endif
// Misc.
static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
static int HW_ABM; // Advanced Bit Manipulation
// SIMD: 128-bit
static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
// SIMD: 256-bit
static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
// SIMD: 512-bit
static int HW_AVX512F; // AVX512 Foundation
static int HW_AVX512CD; // AVX512 Conflict Detection
static int HW_AVX512PF; // AVX512 Prefetch
static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
static int HW_AVX512VL; // AVX512 Vector Length Extensions
static int HW_AVX512BW; // AVX512 Byte + Word
static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
// https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
void check_cpu_features(void) {
int info[4];
cpuid(info, 0);
int nIds = info[0];
cpuid(info, 0x80000000);
unsigned nExIds = info[0];
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
}
}
int is_avx() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_AVX;
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
int is_fma_avx2() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_FMA3 && HW_AVX2;
if (result == 1) printf(" Used FMA & AVX2 \n");
else printf(" Not used FMA & AVX2 \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i;
#pragma omp parallel for
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
{
// L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
// L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
// L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
for (k_d = 0; k_d < (TILE_K); ++k_d)
{
a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
//c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
//c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
//c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
//c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
//c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
//c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
//c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
result256 = _mm256_mul_ps(a256_0, b256_0);
c256_0 = _mm256_add_ps(result256, c256_0);
result256 = _mm256_mul_ps(a256_1, b256_0);
c256_1 = _mm256_add_ps(result256, c256_1);
result256 = _mm256_mul_ps(a256_0, b256_1);
c256_2 = _mm256_add_ps(result256, c256_2);
result256 = _mm256_mul_ps(a256_1, b256_1);
c256_3 = _mm256_add_ps(result256, c256_3);
result256 = _mm256_mul_ps(a256_2, b256_0);
c256_4 = _mm256_add_ps(result256, c256_4);
result256 = _mm256_mul_ps(a256_3, b256_0);
c256_5 = _mm256_add_ps(result256, c256_5);
result256 = _mm256_mul_ps(a256_2, b256_1);
c256_6 = _mm256_add_ps(result256, c256_6);
result256 = _mm256_mul_ps(a256_3, b256_1);
c256_7 = _mm256_add_ps(result256, c256_7);
}
_mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
_mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
_mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
_mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
_mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
_mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
_mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
_mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
}
for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
for (k_d = k; k_d < (k + TILE_K); ++k_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
}
}
}
}
for (k = (K / TILE_K)*TILE_K; k < K; ++k)
{
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
for (j = 0; j < N; ++j) {
C[i_d*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
int j, k;
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8((char)255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
//int i, f, j;
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads( max_num_threads / 2);
}
#endif
//convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
for (i = 0; i < ksize*ksize*n*c; i+=8) {
*((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
}
//for (i = 0; i < w*h*c; i += 8) {
//(*(__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
//}
//__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
//all256_last_zero.m256i_i32[7] = 0;
__m256i all256_last_zero =
_mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
__m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
//__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
__m256 all256_one = _mm256_set1_ps(1);
__m256i all256i_one = _mm256_set1_epi32(1);
///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
float cur_mean = fabs(mean[fil]);
__m256 mean256 = _mm256_set1_ps(cur_mean);
// channel index
//for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w-8; x+=8)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
__m256 sum256 = _mm256_set1_ps(0);
for (chan = 0; chan < c; ++chan) {
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
//__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
if (input_y < 0 || input_y >= h) continue;
//__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
//if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
//if (input_y < 0 || input_y >= h) continue;
//sum += input[input_index] * weights[weights_index];
__m256 in = *((__m256*)&input[input_index]);
__m256 w = _mm256_set1_ps(weights[weights_index]);
//__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
__m256 xor256 = _mm256_xor_ps(w, in);
//printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
//printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
//__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
//sum256 = xor256;
sum256 = _mm256_add_ps(xor256, sum256);
//printf("\n --- \n");
//printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
if (f_x < ksize-1) {
//in = _mm256_permutevar8x32_ps(in, idx256);
//in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
}
}
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
sum256 = _mm256_mul_ps(sum256, mean256);
//printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
// cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
//__m256 out = *((__m256*)&output[output_index]);
//out = _mm256_add_ps(out, sum256);
//(*(__m256*)&output[output_index]) = out;
*((__m256*)&output[output_index]) = sum256;
//_mm256_storeu_ps(&C[i*ldc + j], result256);
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#if defined(_MSC_VER)
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#elif defined(__APPLE__) && defined(__clang__)
return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
__m256i c_bit256 = _mm256_set1_epi8((char)255);
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
*count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
}
// 2nd part - popcnt Mula's algorithm
static inline int get_count_mula(__m256i count_sum) {
return _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
//#pragma omp parallel for
//for (i = 0; i < M; ++i)
#pragma omp parallel for
for (i = 0; i < (M/2)*2; i += 2)
{ // l.n - filters [16 - 55 - 1024]
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
{ // out_h*out_w - one channel output size [169 - 173056]
//int count = 0;
const int bit_step = 256;
__m256i count_sum_0 = _mm256_set1_epi8(0);
__m256i count_sum_1 = _mm256_set1_epi8(0);
__m256i count_sum_2 = _mm256_set1_epi8(0);
__m256i count_sum_3 = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
__m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
__m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
int count_0 = get_count_mula(count_sum_0);
int count_1 = get_count_mula(count_sum_1);
int count_2 = get_count_mula(count_sum_2);
int count_3 = get_count_mula(count_sum_3);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
count_1 = count_1 - f1;
count_2 = count_2 - f1;
count_3 = count_3 - f1;
C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
}
int i_d;
for (i_d = 0; i_d < 2; ++i_d)
{
float mean_val = mean_arr[i + i_d];
for (j = (N / 2) * 2; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
}
}
}
for (i = (M / 2) * 2; i < M; i += 1)
{
float mean_val = mean_arr[i];
int j, k;
for (j = 0; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
int c;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 4; w+=8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
//_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col-pad; ++h) {
for (w = pad; w < width_col-pad-8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col-1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col-1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_align(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{}
else if (a == LEAKY)
{
if (is_fma_avx2()) {
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
{
__m128 row1 = _mm_loadu_ps(&A[0 * lda]);
__m128 row2 = _mm_loadu_ps(&A[1 * lda]);
__m128 row3 = _mm_loadu_ps(&A[2 * lda]);
__m128 row4 = _mm_loadu_ps(&A[3 * lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_storeu_ps(&B[0 * ldb], row1);
_mm_storeu_ps(&B[1 * ldb], row2);
_mm_storeu_ps(&B[2 * ldb], row3);
_mm_storeu_ps(&B[3 * ldb], row4);
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
//int max_i2 = (i + block_size < n) ? (i + block_size) : n;
if (i + block_size < n) {
int max_i2 = i + block_size;
for (j = 0; j < m; j += block_size) {
//int max_j2 = (j + block_size < m) ? (j + block_size) : m;
if (j + block_size < m) {
int max_j2 = j + block_size;
for (i2 = i; i2 < max_i2; i2 += 4) {
for (j2 = j; j2 < max_j2; j2 += 4) {
transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
else {
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
else {
for (i2 = i; i2 < n; ++i2) {
for (j2 = 0; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if(stride == 1 && is_avx() == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#else // AVX
int is_avx() {
return 0;
}
int is_fma_avx2() {
return 0;
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
//PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
//c[i*n + j] += A_PART*b[s*n + j];
PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
//printf(" xnor_result = %d, ", xnor_result);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
//c[i*n + j] += count*mean;
}
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//int i, f, j;
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64 >> 32);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
return;
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
for (i = 0; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char* byte_arr = (char*)xcalloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
{
int i;
//#pragma omp parallel for
for (i = 0; i<block_size; i++) {
int j;
for (j = 0; j<block_size; j++) {
B[j*ldb + i] = A[i*lda + j];
}
}
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
for (j = 0; j < m; j += block_size) {
int max_i2 = i + block_size < n ? i + block_size : n;
int max_j2 = j + block_size < m ? j + block_size : m;
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < max_j2; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#endif // AVX
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (BETA != 1){
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
}
is_avx(); // initialize static variable
if (is_fma_avx2() && !TA && !TB) {
gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
else {
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
CHECK_CUDA(stream_status);
cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
CHECK_CUDA(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
void init_cpu() {
is_avx();
is_fma_avx2();
}
|
libperf_thread.c | /**
* Copyright (C) NVIDIA 2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <ucs/sys/module.h>
#include <ucs/sys/string.h>
#include <tools/perf/lib/libperf_int.h>
#include <string.h>
#include <unistd.h>
#if _OPENMP
# include <omp.h>
static ucs_status_t ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg; /* a single thread context */
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t status;
/* new threads need explicit device association */
status = perf->send_allocator->init(perf);
if (status != UCS_OK) {
goto out;
}
if (perf->send_allocator != perf->recv_allocator) {
status = perf->recv_allocator->init(perf);
if (status != UCS_OK) {
goto out;
}
}
status = ucx_perf_do_warmup(perf, params);
if (UCS_OK != status) {
goto out;
}
/* Run test */
#pragma omp barrier
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (UCS_OK != status) {
goto out;
}
ucx_perf_calc_result(perf, result);
out:
return status;
}
static void ucx_perf_thread_report_aggregated_results(ucx_perf_context_t *perf)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
unsigned i, thread_count = perf->params.thread_count;
double lat_sum_total_avegare = 0.0;
ucx_perf_result_t agg_result;
agg_result.iters = tctx[0].result.iters;
agg_result.bytes = tctx[0].result.bytes;
agg_result.elapsed_time = tctx[0].result.elapsed_time;
agg_result.bandwidth.total_average = 0.0;
agg_result.bandwidth.percentile = 0.0; /* Undefined since used only for latency calculations */
agg_result.latency.total_average = 0.0;
agg_result.msgrate.total_average = 0.0;
agg_result.msgrate.percentile = 0.0; /* Undefined since used only for latency calculations */
/* when running with multiple threads, the moment average value is
* undefined since we don't capture the values of the last iteration */
agg_result.msgrate.moment_average = 0.0;
agg_result.bandwidth.moment_average = 0.0;
agg_result.latency.moment_average = 0.0;
agg_result.latency.percentile = 0.0;
/* in case of multiple threads, we have to aggregate the results so that the
* final output of the result would show the performance numbers that were
* collected from all the threads.
* BW and message rate values will be the sum of their values from all
* the threads, while the latency value is the average latency from the
* threads. */
for (i = 0; i < thread_count; i++) {
agg_result.bandwidth.total_average += tctx[i].result.bandwidth.total_average;
agg_result.msgrate.total_average += tctx[i].result.msgrate.total_average;
lat_sum_total_avegare += tctx[i].result.latency.total_average;
}
agg_result.latency.total_average = lat_sum_total_avegare / thread_count;
rte_call(perf, report, &agg_result, perf->params.report_arg, "", 1, 1);
}
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx = perf->ucp.tctx; /* all the thread contexts on perf */
int ti, thread_count = perf->params.thread_count;
ucs_status_t* statuses;
ucs_status_t status;
omp_set_num_threads(thread_count);
statuses = calloc(thread_count, sizeof(ucs_status_t));
if (statuses == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].status = ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < thread_count; ti++) {
if (UCS_OK != tctx[ti].status) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(tctx[ti].status));
status = tctx[ti].status;
}
}
ucx_perf_thread_report_aggregated_results(perf);
free(statuses);
out:
return status;
}
#else
ucs_status_t ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
matmul_float.c | /*
* Square matrix multiplication
* A[N][N] * B[N][N] = C[N][N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#define N 512
//#define N 16
// read timer in second
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void init(float **A) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = (float)rand()/(float)(RAND_MAX/10.0);
}
}
}
void matmul_simd(float **A, float **B, float **C) {
int i,j,k;
float temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
#pragma omp simd reduction(+:temp)
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
// Debug functions
void print_matrix(float **matrix) {
for (int i = 0; i<8; i++) {
printf("[");
for (int j = 0; j<8; j++) {
printf("%.2f ", matrix[i][j]);
}
puts("]");
}
puts("");
}
void matmul_serial(float **A, float **B, float **C) {
int i,j,k;
float temp;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp = 0;
for (k = 0; k < N; k++) {
temp += A[i][k] * B[j][k];
}
C[i][j] = temp;
}
}
}
float check(float **A, float **B){
float difference = 0;
for(int i = 0;i<N; i++){
for (int j = 0; j<N; j++)
{ difference += A[i][j]- B[i][j];}
}
return difference;
}
// Main
int main(int argc, char *argv[]) {
//Set everything up
float **A = malloc(sizeof(float*)*N);
float **B = malloc(sizeof(float*)*N);
float **C_simd = malloc(sizeof(float*)*N);
float **C_serial = malloc(sizeof(float*)*N);
float **BT = malloc(sizeof(float*)*N);
for (int i = 0; i<N; i++) {
A[i] = malloc(sizeof(float)*N);
B[i] = malloc(sizeof(float)*N);
C_simd[i] = malloc(sizeof(float)*N);
C_serial[i] = malloc(sizeof(float)*N);
BT[i] = malloc(sizeof(float)*N);
}
srand(time(NULL));
init(A);
init(B);
for(int line = 0; line<N; line++){
for(int col = 0; col<N; col++){
BT[line][col] = B[col][line];
}
}
int i;
int num_runs = 10;
double elapsed = read_timer();
for (i=0; i<num_runs; i++)
matmul_simd(A, BT, C_simd);
elapsed = (read_timer() - elapsed);
double elapsed_serial = read_timer();
for (i=0; i<num_runs; i++)
matmul_serial(A, BT, C_serial);
elapsed_serial = (read_timer() - elapsed_serial);
print_matrix(A);
print_matrix(BT);
puts("=\n");
print_matrix(C_simd);
puts("---------------------------------");
print_matrix(C_serial);
double gflops_omp = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed));
double gflops_serial = ((((2.0 * N) * N) * N * num_runs) / (1.0e9 * elapsed_serial));
printf("======================================================================================================\n");
printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N);
printf("------------------------------------------------------------------------------------------------------\n");
printf("Performance:\t\tRuntime (s)\t GFLOPS\n");
printf("------------------------------------------------------------------------------------------------------\n");
printf("matmul_omp:\t\t%4f\t%4f\n", elapsed, gflops_omp);
printf("matmul_serial:\t\t%4f\t%4f\n", elapsed_serial, gflops_serial);
printf("Correctness check: %f\n", check(C_simd,C_serial));
return 0;
}
|
convolutiondepthwise_5x5_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + g * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _sum1 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
vfloat32m1_t _r14 = vle32_v_f32m1(r1 + packn * 4, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k00, _r10, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k01, _r11, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k02, _r12, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k03, _r13, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k04, _r14, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k14, _r14, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
vfloat32m1_t _r24 = vle32_v_f32m1(r2 + packn * 4, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k10, _r20, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k11, _r21, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k12, _r22, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k13, _r23, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k14, _r24, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k24, _r24, vl);
vfloat32m1_t _r30 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r31 = vle32_v_f32m1(r3 + packn, vl);
vfloat32m1_t _r32 = vle32_v_f32m1(r3 + packn * 2, vl);
vfloat32m1_t _r33 = vle32_v_f32m1(r3 + packn * 3, vl);
vfloat32m1_t _r34 = vle32_v_f32m1(r3 + packn * 4, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k20, _r30, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k21, _r31, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k22, _r32, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k23, _r33, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k24, _r34, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k34, _r34, vl);
vfloat32m1_t _r40 = vle32_v_f32m1(r4, vl);
vfloat32m1_t _r41 = vle32_v_f32m1(r4 + packn, vl);
vfloat32m1_t _r42 = vle32_v_f32m1(r4 + packn * 2, vl);
vfloat32m1_t _r43 = vle32_v_f32m1(r4 + packn * 3, vl);
vfloat32m1_t _r44 = vle32_v_f32m1(r4 + packn * 4, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k30, _r40, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k31, _r41, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k32, _r42, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k33, _r43, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k34, _r44, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f32m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k44, _r44, vl);
vfloat32m1_t _r50 = vle32_v_f32m1(r5, vl);
vfloat32m1_t _r51 = vle32_v_f32m1(r5 + packn, vl);
vfloat32m1_t _r52 = vle32_v_f32m1(r5 + packn * 2, vl);
vfloat32m1_t _r53 = vle32_v_f32m1(r5 + packn * 3, vl);
vfloat32m1_t _r54 = vle32_v_f32m1(r5 + packn * 4, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k40, _r50, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k41, _r51, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k42, _r52, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k43, _r53, vl);
_sum1 = vfmacc_vv_f32m1(_sum1, _k44, _r54, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
vse32_v_f32m1(outptr1, _sum1, vl);
outptr0 += packn;
outptr1 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
r4 += packn;
r5 += packn;
}
r0 += 4 * packn + w * packn;
r1 += 4 * packn + w * packn;
r2 += 4 * packn + w * packn;
r3 += 4 * packn + w * packn;
r4 += 4 * packn + w * packn;
r5 += 4 * packn + w * packn;
outptr0 += outw * packn;
outptr1 += outw * packn;
}
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
vfloat32m1_t _r14 = vle32_v_f32m1(r1 + packn * 4, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k14, _r14, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
vfloat32m1_t _r24 = vle32_v_f32m1(r2 + packn * 4, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k24, _r24, vl);
vfloat32m1_t _r30 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r31 = vle32_v_f32m1(r3 + packn, vl);
vfloat32m1_t _r32 = vle32_v_f32m1(r3 + packn * 2, vl);
vfloat32m1_t _r33 = vle32_v_f32m1(r3 + packn * 3, vl);
vfloat32m1_t _r34 = vle32_v_f32m1(r3 + packn * 4, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k34, _r34, vl);
vfloat32m1_t _r40 = vle32_v_f32m1(r4, vl);
vfloat32m1_t _r41 = vle32_v_f32m1(r4 + packn, vl);
vfloat32m1_t _r42 = vle32_v_f32m1(r4 + packn * 2, vl);
vfloat32m1_t _r43 = vle32_v_f32m1(r4 + packn * 3, vl);
vfloat32m1_t _r44 = vle32_v_f32m1(r4 + packn * 4, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f32m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k44, _r44, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn;
r1 += packn;
r2 += packn;
r3 += packn;
r4 += packn;
}
r0 += 4 * packn;
r1 += 4 * packn;
r2 += 4 * packn;
r3 += 4 * packn;
r4 += 4 * packn;
}
}
}
static void convdw5x5s2_packn_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * packn;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
vfloat32m1_t _bias0 = bias ? vle32_v_f32m1(bias + g * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
vfloat32m1_t _sum0 = _bias0;
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _k00 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k01 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k02 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k03 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k04 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k00, _r00, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k01, _r01, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k02, _r02, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k03, _r03, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k04, _r04, vl);
vfloat32m1_t _r10 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r11 = vle32_v_f32m1(r1 + packn, vl);
vfloat32m1_t _r12 = vle32_v_f32m1(r1 + packn * 2, vl);
vfloat32m1_t _r13 = vle32_v_f32m1(r1 + packn * 3, vl);
vfloat32m1_t _r14 = vle32_v_f32m1(r1 + packn * 4, vl);
vfloat32m1_t _k10 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k11 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k12 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k13 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k14 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k10, _r10, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k11, _r11, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k12, _r12, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k13, _r13, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k14, _r14, vl);
vfloat32m1_t _r20 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r21 = vle32_v_f32m1(r2 + packn, vl);
vfloat32m1_t _r22 = vle32_v_f32m1(r2 + packn * 2, vl);
vfloat32m1_t _r23 = vle32_v_f32m1(r2 + packn * 3, vl);
vfloat32m1_t _r24 = vle32_v_f32m1(r2 + packn * 4, vl);
vfloat32m1_t _k20 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k21 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k22 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k23 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k24 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k20, _r20, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k21, _r21, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k22, _r22, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k23, _r23, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k24, _r24, vl);
vfloat32m1_t _r30 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r31 = vle32_v_f32m1(r3 + packn, vl);
vfloat32m1_t _r32 = vle32_v_f32m1(r3 + packn * 2, vl);
vfloat32m1_t _r33 = vle32_v_f32m1(r3 + packn * 3, vl);
vfloat32m1_t _r34 = vle32_v_f32m1(r3 + packn * 4, vl);
vfloat32m1_t _k30 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k31 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k32 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k33 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k34 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 += packn * 5;
_sum0 = vfmacc_vv_f32m1(_sum0, _k30, _r30, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k31, _r31, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k32, _r32, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k33, _r33, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k34, _r34, vl);
vfloat32m1_t _r40 = vle32_v_f32m1(r4, vl);
vfloat32m1_t _r41 = vle32_v_f32m1(r4 + packn, vl);
vfloat32m1_t _r42 = vle32_v_f32m1(r4 + packn * 2, vl);
vfloat32m1_t _r43 = vle32_v_f32m1(r4 + packn * 3, vl);
vfloat32m1_t _r44 = vle32_v_f32m1(r4 + packn * 4, vl);
vfloat32m1_t _k40 = vle32_v_f32m1(k0, vl);
vfloat32m1_t _k41 = vle32_v_f32m1(k0 + packn, vl);
vfloat32m1_t _k42 = vle32_v_f32m1(k0 + packn * 2, vl);
vfloat32m1_t _k43 = vle32_v_f32m1(k0 + packn * 3, vl);
vfloat32m1_t _k44 = vle32_v_f32m1(k0 + packn * 4, vl);
k0 -= packn * 20;
_sum0 = vfmacc_vv_f32m1(_sum0, _k40, _r40, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k41, _r41, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k42, _r42, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k43, _r43, vl);
_sum0 = vfmacc_vv_f32m1(_sum0, _k44, _r44, vl);
vse32_v_f32m1(outptr0, _sum0, vl);
outptr0 += packn;
r0 += packn * 2;
r1 += packn * 2;
r2 += packn * 2;
r3 += packn * 2;
r4 += packn * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
DRB061-matrixvector1-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Matrix-vector multiplication: outer-level loop parallelization
*/
double a[100][100], v[100], v_out[100];
int init()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name init#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus lastprivate(j)
#pragma loop name init#0#0
#pragma cetus parallel
#pragma omp parallel for lastprivate(j)
for (j=0; j<100; j ++ )
{
a[i][j]=((i*j)+0.01);
}
v_out[i]=((i*j)+0.01);
v[i]=((i*j)+0.01);
}
_ret_val_0=0;
return _ret_val_0;
}
int mv()
{
int i, j;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name mv#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
double sum = 0.0;
#pragma cetus private(j)
#pragma loop name mv#0#0
#pragma cetus reduction(+: sum)
#pragma cetus parallel
#pragma omp parallel for private(j) reduction(+: sum)
for (j=0; j<100; j ++ )
{
sum+=(a[i][j]*v[j]);
}
v_out[i]=sum;
}
_ret_val_0=0;
return _ret_val_0;
}
int print()
{
int i, j, k;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name print#0
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name print#0#0
for (j=0; j<100; j ++ )
{
printf("%lf\n", a[i][j]);
}
printf("%lf\n", v_out[i]);
printf("%lf\n", v[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
int main()
{
int _ret_val_0;
init();
mv();
print();
_ret_val_0=0;
return _ret_val_0;
}
|
GB_binop__bset_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bset_uint16
// A.*B function (eWiseMult): GB_AemultB__bset_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bset_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bset_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_uint16
// C=scalar+B GB_bind1st__bset_uint16
// C=scalar+B' GB_bind1st_tran__bset_uint16
// C=A+scalar GB_bind2nd__bset_uint16
// C=A'+scalar GB_bind2nd_tran__bset_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITSET (x, y, uint16_t, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT16 || GxB_NO_BSET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bset_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bset_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bset_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bset_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bset_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bset_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bset_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB_bind1st_tran__bset_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB_bind2nd_tran__bset_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
root-threads-affinity.c | // RUN: %libomp-compile && env LIBOMP_NUM_HIDDEN_HELPER_THREADS=0 OMP_PROC_BIND=close OMP_PLACES=cores KMP_AFFINITY=verbose %libomp-run 8 1 4
// REQUIRES: linux
//
// This test pthread_creates 8 root threads before any OpenMP
// runtime entry is ever called. We have all the root threads
// register with the runtime by calling omp_set_num_threads(),
// but this does not initialize their affinity. The fourth root thread
// then calls a parallel region and we make sure its affinity
// is correct. We also make sure all the other root threads are
// free-floating since they have not called into a parallel region.
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <pthread.h>
#include <unistd.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include "libomp_test_affinity.h"
volatile int entry_flag = 0;
volatile int flag = 0;
volatile int num_roots_arrived = 0;
int num_roots;
int spawner = 0;
pthread_mutex_t lock;
int register_workers = 0; // boolean
affinity_mask_t *full_mask;
int __kmpc_global_thread_num(void*);
int get_os_thread_id() {
return (int)syscall(SYS_gettid);
}
int place_and_affinity_match() {
int i, max_cpu;
char buf[512];
affinity_mask_t *mask = affinity_mask_alloc();
int place = omp_get_place_num();
int num_procs = omp_get_place_num_procs(place);
int *ids = (int*)malloc(sizeof(int) * num_procs);
omp_get_place_proc_ids(place, ids);
get_thread_affinity(mask);
affinity_mask_snprintf(buf, sizeof(buf), mask);
printf("Primary Thread Place: %d\n", place);
printf("Primary Thread mask: %s\n", buf);
for (i = 0; i < num_procs; ++i) {
int cpu = ids[i];
if (!affinity_mask_isset(mask, cpu))
return 0;
}
max_cpu = AFFINITY_MAX_CPUS;
for (i = 0; i < max_cpu; ++i) {
int cpu = i;
if (affinity_mask_isset(mask, cpu)) {
int j, found = 0;
for (j = 0; j < num_procs; ++j) {
if (ids[j] == cpu) {
found = 1;
break;
}
}
if (!found)
return 0;
}
}
affinity_mask_free(mask);
free(ids);
return 1;
}
void* thread_func(void *arg) {
int place, nplaces;
int root_id = *((int*)arg);
int pid = getpid();
int tid = get_os_thread_id();
// Order how the root threads are assigned a gtid in the runtime
// i.e., root_id = gtid
while (1) {
int v = entry_flag;
if (v == root_id)
break;
}
// If main root thread
if (root_id == spawner) {
printf("Initial application thread (pid=%d, tid=%d, spawner=%d) reached thread_func (will call OpenMP)\n", pid, tid, spawner);
omp_set_num_threads(4);
#pragma omp atomic
entry_flag++;
// Wait for the workers to signal their arrival before #pragma omp parallel
while (num_roots_arrived < num_roots - 1) {}
// This will trigger the output for KMP_AFFINITY in this case
#pragma omp parallel
{
int gtid = __kmpc_global_thread_num(NULL);
#pragma omp single
{
printf("Exactly %d threads in the #pragma omp parallel\n",
omp_get_num_threads());
}
#pragma omp critical
{
printf("OpenMP thread %d: gtid=%d\n", omp_get_thread_num(), gtid);
}
}
flag = 1;
if (!place_and_affinity_match()) {
fprintf(stderr, "error: place and affinity mask do not match for primary thread\n");
exit (EXIT_FAILURE);
}
} else { // If worker root thread
// Worker root threads, register with OpenMP through omp_set_num_threads()
// if designated to, signal their arrival and then wait for the main root
// thread to signal them to exit.
printf("New root pthread (pid=%d, tid=%d) reached thread_func\n", pid, tid);
if (register_workers)
omp_set_num_threads(4);
#pragma omp atomic
entry_flag++;
pthread_mutex_lock(&lock);
num_roots_arrived++;
pthread_mutex_unlock(&lock);
while (flag == 0) {}
// Main check whether root threads' mask is equal to the
// initial affinity mask
affinity_mask_t *mask = affinity_mask_alloc();
get_thread_affinity(mask);
if (!affinity_mask_equal(mask, full_mask)) {
char buf[1024];
printf("root thread %d mask: ", root_id);
affinity_mask_snprintf(buf, sizeof(buf), mask);
printf("initial affinity mask: %s\n", buf);
fprintf(stderr, "error: root thread %d affinity mask not equal"
" to initial full mask\n", root_id);
affinity_mask_free(mask);
exit(EXIT_FAILURE);
}
affinity_mask_free(mask);
}
return NULL;
}
int main(int argc, char** argv) {
int i;
if (argc != 3 && argc != 4) {
fprintf(stderr, "usage: %s <num_roots> <register_workers_bool> [<spawn_root_number>]\n", argv[0]);
exit(EXIT_FAILURE);
}
// Initialize pthread mutex
pthread_mutex_init(&lock, NULL);
// Get initial full mask
full_mask = affinity_mask_alloc();
get_thread_affinity(full_mask);
// Get the number of root pthreads to create and allocate resources for them
num_roots = atoi(argv[1]);
pthread_t *roots = (pthread_t*)malloc(sizeof(pthread_t) * num_roots);
int *root_ids = (int*)malloc(sizeof(int) * num_roots);
// Get the flag indicating whether to have root pthreads call omp_set_num_threads() or not
register_workers = atoi(argv[2]);
if (argc == 4)
spawner = atoi(argv[3]);
// Spawn worker root threads
for (i = 1; i < num_roots; ++i) {
*(root_ids + i) = i;
pthread_create(roots + i, NULL, thread_func, root_ids + i);
}
// Have main root thread (root 0) go into thread_func
*root_ids = 0;
thread_func(root_ids);
// Cleanup all resources
for (i = 1; i < num_roots; ++i) {
void *status;
pthread_join(roots[i], &status);
}
free(roots);
free(root_ids);
pthread_mutex_destroy(&lock);
return EXIT_SUCCESS;
}
|
1064.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp target teams distribute thread_limit(256)
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp target teams distribute thread_limit(256)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
hp16.c | /* 16-bit hash prospector
*
* Unlike the 32-bit / 64-bit prospector, this implementation is fully
* portable and will run on just about any system. It's also capable of
* generating and evaluating 128kB s-boxes.
*
* Be mindful of C integer promotion rules when doing 16-bit operations.
* For instance, on 32-bit implementations unsigned 16-bit operands will
* be promoted to signed 32-bit integers, leading to incorrect results in
* certain cases. The C programs printed by this program are careful to
* promote 16-bit operations to "unsigned int" where needed.
*
* Since 16-bit hashes are likely to be needed on machines that do not
* have efficient hardware multiplication or whose ISAs lack rotation
* instructions, these operations may be optionally omitted during
* exploration (-m, -r).
*
* This is free and unencumbered software released into the public domain.
*/
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define OPS_MAX 32
enum hf_type {
HF16_XOR, // x ^= imm
HF16_MUL, // x *= imm (odd)
HF16_ADD, // x += imm
HF16_ROT, // x = (x << imm) | (x >> (16 - imm))
HF16_NOT, // x = ~x
HF16_XORL, // x ^= x << imm
HF16_XORR, // x ^= x >> imm
HF16_ADDL, // x += x << imm
HF16_SUBL, // x -= x << imm
HF16_SBOX, // x = sbox[x]
};
struct hf_op {
enum hf_type type;
unsigned imm;
};
static unsigned short sbox[1L<<16];
static unsigned long long
hash64(unsigned long long x)
{
x ^= x >> 32;
x *= 0x25b751109e05be63;
x &= 0xffffffffffffffff;
x ^= x >> 32;
x *= 0x2330e1453ed4b9b9;
x &= 0xffffffffffffffff;
x ^= x >> 32;
return x;
}
static unsigned long
u32(unsigned long long *s)
{
unsigned long r = *s >> 32;
*s = *s*0x7c3c3267d015ceb5 + 1;
r &= 0xffffffff;
r ^= r >> 16;
r *= 0x60857ba9;
return r & 0xffffffff;
}
static unsigned long
randint(unsigned long r, unsigned long long s[1])
{
unsigned long long x = u32(s);
unsigned long long m = x * r;
unsigned long y = m & 0xffffffff;
if (y < r) {
unsigned long t = -r % r;
while (y < t) {
x = u32(s);
m = x * r;
y = m & 0xffffffff;
}
}
return m >> 32;
}
static struct hf_op
hf_gen(enum hf_type type, unsigned long long s[1])
{
struct hf_op op;
op.type = type;
switch (op.type) {
case HF16_NOT:
case HF16_SBOX: op.imm = 0; break;
case HF16_XOR:
case HF16_ADD: op.imm = u32(s)>>16; break;
case HF16_MUL: op.imm = u32(s)>>16 | 1; break;
case HF16_ROT:
case HF16_XORL:
case HF16_XORR:
case HF16_ADDL:
case HF16_SUBL: op.imm = 1 + u32(s)%15; break;
}
return op;
}
/* May these operations be adjacent? */
static int
hf_type_valid(enum hf_type a, enum hf_type b)
{
switch (a) {
case HF16_NOT:
case HF16_XOR:
case HF16_MUL:
case HF16_ADD:
case HF16_ROT:
case HF16_SBOX: return a != b;
case HF16_XORL:
case HF16_XORR:
case HF16_ADDL:
case HF16_SUBL: return 1;
}
return 0;
}
static void
hf_genfunc(struct hf_op *ops, int n, unsigned long long s[1])
{
for (int i = 0; i < n; i++) {
do {
enum hf_type type = u32(s) % HF16_SBOX; // (exclude sbox)
ops[i] = hf_gen(type, s);
} while (i > 0 && !hf_type_valid(ops[i-1].type, ops[i].type));
}
}
/* Indicate operation diffusion direction (+1 left, 0 none, -1 right). */
static int
opdir(struct hf_op op)
{
switch (op.type) {
case HF16_NOT:
case HF16_XOR:
case HF16_ADD:
case HF16_SBOX: return 0;
case HF16_MUL:
case HF16_XORL:
case HF16_ADDL:
case HF16_SUBL: return +1;
case HF16_XORR: return -1;
case HF16_ROT: if (op.imm < 8) return +1;
if (op.imm > 8) return -1;
return 0;
}
abort();
}
/* Prefer to alternate bit diffusion directions. */
static void
hf_gensmart(struct hf_op *ops, int n, unsigned long long s[1])
{
int dir = 0;
for (int i = 0; i < n; i++) {
int newdir;
do {
ops[i] = hf_gen(u32(s)%HF16_SBOX, s);
newdir = opdir(ops[i]);
} while (dir && newdir == dir);
dir = newdir ? newdir : dir;
}
}
static int
popcount(int v)
{
// both GCC and Clang recognize this function as popcnt
int c = 0;
for (; v; c++) v &= v - 1;
return c;
}
static void
hf_genxormul(struct hf_op *ops, int n, unsigned long long s[1])
{
ops[0].type = HF16_XORR;
ops[0].imm = 1 + popcount(u32(s) >> 18);
for (int i = 0; i < n; i++) {
ops[2*i+1].type = HF16_MUL;
ops[2*i+1].imm = u32(s)>>16 | 1;
ops[2*i+2].type = HF16_XORR;
ops[2*i+2].imm = 1 + popcount(u32(s) >> 18);
}
}
/* An Add-Xor-Shift (AXS) hash alternates between diffusion leftward and
* rightward where one direction is always xorshift and the other direction is
* always add/sub-shift.
*
* x ^= x >> A; x += x << B;
* x ^= x >> C; x -= x << D;
* x ^= x >> E; x += x << F;
*
* This function generates all permutations of this construction in order.
*/
#define AXS_COUNT 182250000
#define AXS_SIZE 6
static void
hf_genaxs(struct hf_op *ops, long i)
{
int shifts[] = {
1 + (i / 1) % 15,
1 + (i / 15) % 15,
1 + (i / 225) % 15,
1 + (i / 3375) % 15,
1 + (i / 50625) % 15,
1 + (i / 759375) % 15,
};
int types[] = {
(i / 11390625) % 2,
(i / 22781250) % 2,
(i / 45562500) % 2,
};
int swap = (i / 91125000) % 2;
for (int j = 0; j < 6; j += 2) {
ops[j+ swap].type = types[j/2] ? HF16_ADDL : HF16_SUBL;
ops[j+ swap].imm = shifts[j+0];
ops[j+!swap].type = HF16_XORR;
ops[j+!swap].imm = shifts[j+1];
}
}
static unsigned
hf_apply(const struct hf_op *ops, int n, unsigned x)
{
for (int i = 0; i < n; i++) {
switch (ops[i].type) {
case HF16_XOR: x ^= ops[i].imm; break;
case HF16_MUL: x *= ops[i].imm; break;
case HF16_ADD: x += ops[i].imm; break;
case HF16_ROT: x = x<<ops[i].imm | x>>(16 - ops[i].imm); break;
case HF16_NOT: x = ~x; break;
case HF16_XORL: x ^= x << ops[i].imm; break;
case HF16_XORR: x ^= x >> ops[i].imm; break;
case HF16_ADDL: x += x << ops[i].imm; break;
case HF16_SUBL: x -= x << ops[i].imm; break;
case HF16_SBOX: x = sbox[x]; break;
}
x &= 0xffff;
}
return x;
}
static void
hf_print(const struct hf_op *ops, int n, FILE *f)
{
fprintf(f, "uint16_t hash(uint16_t x)\n");
fprintf(f, "{\n");
for (int i = 0; i < n; i++) {
fputs(" ", f);
switch (ops[i].type) {
case HF16_XOR:
fprintf(f, "x ^= 0x%04x;\n", ops[i].imm);
break;
case HF16_MUL:
fprintf(f, "x *= 0x%04xU;\n", ops[i].imm);
break;
case HF16_ADD:
fprintf(f, "x += 0x%04xU;\n", ops[i].imm);
break;
case HF16_ROT:
fprintf(f, "x = (unsigned)x<<%d | x >>%d;\n",
ops[i].imm, 16-ops[i].imm);
break;
case HF16_NOT:
fprintf(f, "x = ~x;\n");
break;
case HF16_XORL:
fprintf(f, "x ^= (unsigned)x << %d;\n", ops[i].imm);
break;
case HF16_XORR:
fprintf(f, "x ^= x >> %d;\n", ops[i].imm);
break;
case HF16_ADDL:
fprintf(f, "x += (unsigned)x << %d;\n", ops[i].imm);
break;
case HF16_SUBL:
fprintf(f, "x -= (unsigned)x << %d;\n", ops[i].imm);
break;
case HF16_SBOX:
fprintf(f, "x = sbox[x];\n");
break;
}
}
fprintf(f, " return x;\n");
fprintf(f, "}\n");
}
static void
sbox_init(void)
{
for (long i = 0; i < 1L<<16; i++) {
sbox[i] = i;
}
}
static void
sbox_shuffle(unsigned long long s[1])
{
for (long i = 0xffff; i > 0; i--) {
long j = randint(i + 1, s);
unsigned swap = sbox[i];
sbox[i] = sbox[j];
sbox[j] = swap;
}
}
static void
sbox_print(FILE *f)
{
for (long i = 0; i < 1L<<16; i++) {
fprintf(f, "%04x%c", sbox[i], i % 16 == 15 ? '\n' : ' ');
}
}
static double
score(const struct hf_op *ops, int n)
{
long bins[32][32] = {{0}};
for (long x = 0; x < 1L<<16; x++) {
unsigned h0 = hf_apply(ops, n, x);
for (int j = 0; j < 16; j++) {
unsigned bit = 1U << j;
unsigned h1 = hf_apply(ops, n, x^bit);
unsigned set = h0 ^ h1;
for (int k = 0; k < 16; k++)
bins[j][k] += (set >> k) & 1;
}
}
double mean = 0.0;
for (int j = 0; j < 16; j++) {
for (int k = 0; k < 16; k++) {
double diff = (bins[j][k] - (1<<15)) / (double)(1<<15);
mean += (diff * diff) / (16 * 16);
}
}
return sqrt(mean);
}
static int
match(const struct hf_op *ops, int n, int types)
{
for (int i = 0; i < n; i++) {
if (1<<ops[i].type & types) {
return 1;
}
}
return 0;
}
static int xoptind = 1;
static int xopterr = 1;
static int xoptopt;
static char *xoptarg;
static int
xgetopt(int argc, char **argv, const char *optstring)
{
static int optpos = 1;
const char *arg;
(void)argc;
/* Reset? */
if (xoptind == 0) {
xoptind = 1;
optpos = 1;
}
arg = argv[xoptind];
if (arg && strcmp(arg, "--") == 0) {
xoptind++;
return -1;
} else if (!arg || arg[0] != '-' || !isalnum(arg[1])) {
return -1;
} else {
const char *opt = strchr(optstring, arg[optpos]);
xoptopt = arg[optpos];
if (!opt) {
if (xopterr && *optstring != ':')
fprintf(stderr, "%s: illegal option: %c\n", argv[0], xoptopt);
return '?';
} else if (opt[1] == ':') {
if (arg[optpos + 1]) {
xoptarg = (char *)arg + optpos + 1;
xoptind++;
optpos = 1;
return xoptopt;
} else if (argv[xoptind + 1]) {
xoptarg = (char *)argv[xoptind + 1];
xoptind += 2;
optpos = 1;
return xoptopt;
} else {
if (xopterr && *optstring != ':')
fprintf(stderr,
"%s: option requires an argument: %c\n",
argv[0], xoptopt);
return *optstring == ':' ? ':' : '?';
}
} else {
if (!arg[++optpos]) {
xoptind++;
optpos = 1;
}
return xoptopt;
}
}
}
static void
usage(FILE *f)
{
fprintf(f, "hp16: [-HISX] [-hmr] [-n INT]\n");
fprintf(f, " -A mode: evaluate AXS hashes\n");
fprintf(f, " -H mode: random hash prospector (default)\n");
fprintf(f, " -I mode: smarter (?) hash prospector\n");
fprintf(f, " -S mode: s-box prospector \n");
fprintf(f, " -X mode: xorshift-multiply prospector\n");
fprintf(f, " -h print this message and exit\n");
fprintf(f, " -m exclude multiplication\n");
fprintf(f, " -n INT number of operations\n");
fprintf(f, " -r exclude rotation\n");
}
int
main(int argc, char **argv)
{
char *ptr;
int n = 0;
int exclude = 0;
enum {
MODE_HASH, MODE_SMART, MODE_XORMUL, MODE_SBOX, MODE_AXS
} mode = MODE_HASH;
unsigned long tmp;
struct hf_op ops[1+2*OPS_MAX] = {{HF16_SBOX, 0}};
int option;
while ((option = xgetopt(argc, argv, "AHhImn:rSX")) != -1) {
switch (option) {
case 'A':
mode = MODE_AXS;
break;
case 'H':
mode = MODE_HASH;
break;
case 'h':
usage(stdout);
return 0;
case 'I':
mode = MODE_SMART;
break;
case 'm':
exclude |= 1<<HF16_MUL;
break;
case 'n':
tmp = strtoul(xoptarg, &ptr, 10);
if (!tmp || *ptr || tmp > OPS_MAX) {
fprintf(stderr, "fatal: invalid n, %s\n", xoptarg);
usage(stderr);
return 1;
}
n = tmp;
break;
case 'r':
exclude |= 1<<HF16_ROT;
break;
case 'S':
mode = MODE_SBOX;
break;
case 'X':
mode = MODE_XORMUL;
break;
case '?':
usage(stderr);
return 1;
}
}
switch (mode) {
case MODE_HASH:
case MODE_SMART: n = n ? n : 7; break;
case MODE_XORMUL: n = n ? 1 + 2*n : 5; break;
case MODE_SBOX: sbox_init(); n = 1; break;
case MODE_AXS: break;
}
double best = 1;
unsigned long long s[1] = {hash64(time(0))};
if (mode == MODE_AXS) {
#pragma omp parallel for
for (long i = 0; i < AXS_COUNT; i++) {
struct hf_op hf[AXS_SIZE];
hf_genaxs(hf, i);
double r = score(hf, AXS_SIZE);
#pragma omp critical
if (r < best) {
best = r;
printf("// bias = %.17g\n", r);
hf_print(hf, AXS_SIZE, stdout);
fputc('\n', stdout);
fflush(stdout);
}
}
return 0;
}
for (;;) {
*s += hash64(time(0));
switch (mode) {
case MODE_HASH:
do {
hf_genfunc(ops, n, s);
} while (match(ops, n, exclude));
break;
case MODE_SMART:
do {
hf_gensmart(ops, n, s);
} while (match(ops, n, exclude));
break;
case MODE_XORMUL:
hf_genxormul(ops, (n-1)/2, s);
break;
case MODE_SBOX:
sbox_shuffle(s);
break;
case MODE_AXS:
abort();
}
*s -= hash64(clock());
double r = score(ops, n);
if (r < best) {
switch (mode) {
case MODE_HASH:
case MODE_SMART:
case MODE_XORMUL:
printf("// bias = %.17g\n", r);
hf_print(ops, n, stdout);
fputc('\n', stdout);
break;
case MODE_SBOX:
fprintf(stdout, "// bias = %.17g\n", r);
sbox_print(stdout);
fputc('\n', stdout);
fprintf(stderr, "// bias = %.17g\n", r);
fflush(stderr);
break;
case MODE_AXS:
abort();
}
fflush(stdout);
best = r;
}
}
}
|
gi_regular_grid_trilinear_function.h | /*
*
* Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu>
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*/
#ifndef REGULAR_GRID_TRILINEAR_FUNCTION
#define REGULAR_GRID_TRILINEAR_FUNCTION
#include <algorithm>
#include <cmath>
#include "base/gi_basic_types.h"
#include "base/gi_vectors.h"
#include "base/gi_regular_grid_3d.h"
namespace GInt {
// store image and gradient tied to a 3d grid.
class RegularGridTrilinearFunction {
protected:
RegularGrid3D * m_grid;
Vec3d* m_grad;
FLOATTYPE* m_image;
FLOATTYPE m_min_value;
FLOATTYPE m_max_value;
bool m_i_made_gradient;
bool m_i_made_image;
void fill_extents() {
FLOATTYPE t_max_val = m_max_value = m_image[0];
FLOATTYPE t_min_val = m_min_value = m_image[0];
INDEX_TYPE num_elements = m_grid->NumElements();
INDEX_TYPE ii;
#pragma omp parallel shared(num_elements) private(ii) firstprivate(t_max_val,t_min_val)
{
#pragma omp for nowait
for (ii = 0; ii<num_elements; ++ii)
{
if (m_image[ii] > t_max_val)
{
t_max_val = m_image[ii];
}
if (m_image[ii] < t_min_val)
{
t_min_val = m_image[ii];
}
}
#pragma omp critical
{
if (t_max_val > m_max_value) m_max_value = t_max_val;
if (t_min_val < m_min_value) m_min_value = t_min_val;
}
}
}
public:
FLOATTYPE GetMinValue() const { return m_min_value; }
FLOATTYPE GetMaxValue() const { return m_max_value; }
RegularGridTrilinearFunction(RegularGrid3D* grid, FLOATTYPE *image = 0) : m_grid(grid) {
m_i_made_image = false;
m_i_made_gradient = false;
m_image = NULL;
m_grad = NULL;
// use the function if it is passed, otherwise simply allocate memory
if(image != 0) { m_image = image; }
//m_grad = new Vec3d[m_grid->NumElements()];
}
~RegularGridTrilinearFunction() {
if (m_i_made_gradient) delete[] m_grad;
if (m_i_made_image) delete[] m_image;
}
// return pointer to underlying mesh and function
const RegularGrid3D* GetGrid() const { return m_grid; }
FLOATTYPE* GetImage() const { return m_image; }
// sample the image at integral location
FLOATTYPE SampleImage(const Vec3l& p) const {
return m_image[m_grid->Index3d(p)];
}
// sample the image at integral location
FLOATTYPE SampleImage(const INDEX_TYPE id) const {
return m_image[id];
}
// sample the gradient at integral location
const Vec3d& SampleGrad(const Vec3l& p) const {
return m_grad[m_grid->Index3d(p)];
}
FLOATTYPE TriLinInterpValue(const Vec3d& s) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
Vec3d b = n[0];
//s.print_vf();
//b.print_vf();
Vec3d factors = s - b;
FLOATTYPE x0 = (1 - factors[0]) * SampleImage(n[0]) + SampleImage(n[1]) * factors[0];
FLOATTYPE x1 = (1 - factors[0]) * SampleImage(n[2]) + SampleImage(n[3]) * factors[0];
FLOATTYPE x2 = (1 - factors[0]) * SampleImage(n[4]) + SampleImage(n[5]) * factors[0];
FLOATTYPE x3 = (1 - factors[0]) * SampleImage(n[6]) + SampleImage(n[7]) * factors[0];
FLOATTYPE y0 = (1 - factors[1]) *x0 + x1 * factors[1];
FLOATTYPE y1 = (1 - factors[1]) *x2 + x3 * factors[1];
return (1 - factors[2]) *y0 + y1 * factors[2];
}
// return trilinearly interpolated value
Vec3d TriLinInterpGrad(const Vec3d& s) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
Vec3d b = n[0];
//s.print_vf();
//b.print_vf();
Vec3d factors = s - b;
Vec3d x0 = Vec3d::Lerp(SampleGrad(n[0]), SampleGrad(n[1]), factors[0]);
Vec3d x1 = Vec3d::Lerp(SampleGrad(n[2]), SampleGrad(n[3]), factors[0]);
Vec3d x2 = Vec3d::Lerp(SampleGrad(n[4]), SampleGrad(n[5]), factors[0]);
Vec3d x3 = Vec3d::Lerp(SampleGrad(n[6]), SampleGrad(n[7]), factors[0]);
Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]);
Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]);
return Vec3d::Lerp(y0, y1, factors[2]);
}
void SetGradExplicit(INDEX_TYPE id, Vec3d vec) {
this->m_grad[id] = vec;
}
// fill in vals with the 8 values of hte gradient around sample poitn
void GetGradSurrounding(const Vec3d& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
void GetGradSurrounding(const Vec3l& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurrounding(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
// use with extreme care - no boundary checks, only do on really interior poitns
void GetGradSurroundingNoBoundaryCheck(const Vec3d& s, Vec3d* vals) const {
Vec3l n[8]; // for 8 vertices around s - some may be repeated based on boundary cond.
m_grid->GatherSurroundingNoBoundaryCheck(s, n);
for (int i = 0; i < 8; i++) vals[i] = SampleGrad(n[i]);
}
FLOATTYPE InterpolatedValue(const Vec3d& s) const {
return TriLinInterpValue(s);
}
Vec3d InterpolatedGrad(const Vec3d& s) const {
return TriLinInterpGrad(s);
}
// allow reuse of sampled gradient - the assumption that vals has the gradient arrows around s
Vec3d TriLinInterpGrad(const Vec3d& s, const Vec3l& int_base, Vec3d* vals) const {
//if (!(s.IntFloor() == int_base)) {
// printf("s="); s.PrintFloat(); printf("d="); int_base.PrintFloat();
//}
//
//Vec3d d = int_base.IntFloor();
Vec3d factors = s - int_base;
Vec3d x0 = Vec3d::Lerp(vals[0], vals[1], factors[0]);
Vec3d x1 = Vec3d::Lerp(vals[2], vals[3], factors[0]);
Vec3d x2 = Vec3d::Lerp(vals[4], vals[5], factors[0]);
Vec3d x3 = Vec3d::Lerp(vals[6], vals[7], factors[0]);
Vec3d y0 = Vec3d::Lerp(x0, x1, factors[1]);
Vec3d y1 = Vec3d::Lerp(x2, x3, factors[1]);
return Vec3d::Lerp(y0, y1, factors[2]);
}
void LoadImageFromFloatFile(const char* fname) {
size_t image_size = m_grid->NumElements();
// fill in image
m_image = new FLOATTYPE[image_size]; m_i_made_image = true;
FILE* fin = fopen(fname, "rb");
for (INDEX_TYPE i = 0; i < image_size; i++) {
float tval = 0;
fread(&tval, sizeof(float), 1, fin);
m_image[i] = tval;
}
fclose(fin);
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void LoadImageFromFile(const char* fname) {
size_t image_size = m_grid->NumElements();
// fill in image
m_image = new FLOATTYPE[image_size]; m_i_made_image = true;
FILE* fin = fopen(fname, "rb");
fread(m_image, sizeof(FLOATTYPE), image_size, fin);
fclose(fin);
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void ShallowCopyImage(FLOATTYPE *image) {
m_image = image;
INDEX_TYPE image_size = m_grid->NumElements();
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
void DeepCopyImage(const FLOATTYPE *image) {
m_image = new FLOATTYPE[m_grid->NumElements()]; m_i_made_image = true;
INDEX_TYPE image_size = m_grid->NumElements();
memcpy(m_image, image, image_size*sizeof(FLOATTYPE));
fill_extents();
printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value);
}
static const FLOATTYPE kRKCoefficients[5][9];
Vec3d GradientFromImage(const Vec3l& p, int rklevel) {
Vec3l negs[9]; // don't support more than 4th order - cmon. would be ridiculous
double res_x = 0.0;
int rklevel_x = m_grid->Gather1DNeighborhood(p, 0, rklevel, negs);
int nume_x = rklevel_x * 2 + 1; // number of entries to average
for (int i = 0; i < nume_x; i++) {
res_x += kRKCoefficients[rklevel_x][i] * SampleImage(negs[i]);
}
double res_y = 0.0;
int rklevel_y = m_grid->Gather1DNeighborhood(p, 1, rklevel, negs);
int nume_y = rklevel_y * 2 + 1; // number of entries to average
for (int i = 0; i < nume_y; i++) {
res_y += kRKCoefficients[rklevel_y][i] * SampleImage(negs[i]);
}
double res_z = 0.0;
int rklevel_z = m_grid->Gather1DNeighborhood(p, 2, rklevel, negs);
int nume_z = rklevel_z * 2 + 1; // number of entries to average
for (int i = 0; i < nume_z; i++) {
res_z += kRKCoefficients[rklevel_z][i] * SampleImage(negs[i]);
}
return Vec3d(res_x, res_y, res_z);
}
inline bool IsGreater(INDEX_TYPE a, INDEX_TYPE b) const {
if (m_image[a] > m_image[b]) return true;
if (m_image[b] > m_image[a]) return false;
//if (a == b) printf("WHOA THERE NELLY\n");
return a > b;
}
//Vec3d IStep(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const {
// return m_grid->Inbounds(p + (grad * h));
//}
//Vec3d IStepNoBoundaryCheck(const Vec3d& p, const Vec3d& grad, const FLOATTYPE h) const {
// return p + (grad * h);
//}
// add in block structure
void ComputeGradFromImage(int rklevel) {
m_grad = new Vec3d[m_grid->NumElements()];
m_i_made_gradient = true;
#pragma omp parallel for
for (int i = 0; i < m_grid->XYZ()[0]; i++) {
for (int j = 0; j < m_grid->XYZ()[1]; j++) {
for (int k = 0; k < m_grid->XYZ()[2]; k++) {
Vec3l p(i, j, k);
m_grad[m_grid->Index3d(p)] = GradientFromImage(p, rklevel);
}
}
}
}
void Negate() {
if (m_grad != NULL) {
#pragma omp parallel for schedule(static)
for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) {
this->m_image[i] *= -1;
this->m_grad[i] *= -1.0;
}
}
else {
#pragma omp parallel for schedule(static)
for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) {
this->m_image[i] *= -1;
}
}
}
};
};
#endif
|
sections-4.c | /* PR c++/24613 */
/* { dg-do compile } */
#pragma omp section /* { dg-error "may only be used in" } */
int i;
void
foo (void)
{
#pragma omp section /* { dg-error "may only be used in" } */
i++;
}
|
omp_parallel_sections_firstprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel sections firstprivate directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel sections firstprivate</ompts:directive>
<ompts:dependences>omp critical</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_sections_firstprivate</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int sum;
int sum0;
</ompts:orphan:vars>
int known_sum;
sum =7;
sum0=11;
<ompts:orphan>
#pragma omp parallel sections <ompts:check>firstprivate(sum0)</ompts:check><ompts:crosscheck>private(sum0)</ompts:crosscheck>
{
#pragma omp section
{
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
#pragma omp section
{
#pragma omp critical
{
sum= sum+sum0;
} /*end of critical */
}
} /*end of parallel sections*/
</ompts:orphan>
known_sum=11*3+7;
return (known_sum==sum);
} /* end of check_section_firstprivate*/
</ompts:testcode>
</ompts:test>
|
ten_tusscher_2004_epi_S2_19.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S2_19.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4901203553897,0.00131177121243054,0.777786288577117,0.777647224146045,0.000176819820661720,0.484277066680072,0.00295670386892032,0.999998321645759,1.95882583997698e-08,1.91086959570826e-05,0.999769802784257,1.00742294542800,0.999998504302701,3.74218001174359e-05,1.41921088197810,10.0015161419689,139.208342414277};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.1722612334159,0.000259112383736700,0.000154841929841439,0.000361218133317334,0.277128455649609,0.153642408006870,0.209381667465666,4.20509839372909,0.0199270314805181,1.58059649007092,1098.43907813844,0.000639220600349527,0.0905927390261824,0.0181442296796367,0.00430751059648478,1.23911116806789e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
opal_test.c | // Copyright 2009-2021 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2021, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
extern "C"
void ariel_enable() { printf("Inside Ariel\n"); }
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
ariel_enable();
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel num_threads(2)
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
resize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE %
% R R E SS I ZZ E %
% RRRR EEE SSS I ZZZ EEE %
% R R E SS I ZZ E %
% R R EEEEE SSSSS IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Image Resize Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#if defined(MAGICKCORE_LQR_DELEGATE)
#include <lqr.h>
#endif
/*
Typedef declarations.
*/
struct _ResizeFilter
{
double
(*filter)(const double,const ResizeFilter *),
(*window)(const double,const ResizeFilter *),
support, /* filter region of support - the filter support limit */
window_support, /* window support, usally equal to support (expert only) */
scale, /* dimension scaling to fit window support (usally 1.0) */
blur, /* x-scale (blur-sharpen) */
coefficient[7]; /* cubic coefficents for BC-cubic filters */
ResizeWeightingFunctionType
filterWeightingType,
windowWeightingType;
size_t
signature;
};
/*
Forward declaractions.
*/
static double
I0(double x),
BesselOrderOne(double),
Sinc(const double, const ResizeFilter *),
SincFast(const double, const ResizeFilter *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F i l t e r F u n c t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% These are the various filter and windowing functions that are provided.
%
% They are internal to this module only. See AcquireResizeFilterInfo() for
% details of the access to these functions, via the GetResizeFilterSupport()
% and GetResizeFilterWeight() API interface.
%
% The individual filter functions have this format...
%
% static MagickRealtype *FilterName(const double x,const double support)
%
% A description of each parameter follows:
%
% o x: the distance from the sampling point generally in the range of 0 to
% support. The GetResizeFilterWeight() ensures this a positive value.
%
% o resize_filter: current filter information. This allows function to
% access support, and possibly other pre-calculated information defining
% the functions.
%
*/
static double Blackman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Blackman: 2nd order cosine windowing function:
0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x)
Refactored by Chantal Racette and Nicolas Robidoux to one trig call and
five flops.
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.34+cosine*(0.5+cosine*0.16));
}
static double Bohman(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Bohman: 2rd Order cosine windowing function:
(1-x) cos(pi x) + sin(pi x) / pi.
Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops,
taking advantage of the fact that the support of Bohman is 1.0 (so that we
know that sin(pi x) >= 0).
*/
const double cosine=cos((double) (MagickPI*x));
const double sine=sqrt(1.0-cosine*cosine);
magick_unreferenced(resize_filter);
return((1.0-x)*cosine+(1.0/MagickPI)*sine);
}
static double Box(const double magick_unused(x),
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(x);
magick_unreferenced(resize_filter);
/*
A Box filter is a equal weighting function (all weights equal).
DO NOT LIMIT results by support or resize point sampling will work
as it requests points beyond its normal 0.0 support size.
*/
return(1.0);
}
static double Cosine(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Cosine window function:
cos((pi/2)*x).
*/
return((double)cos((double) (MagickPI2*x)));
}
static double CubicBC(const double x,const ResizeFilter *resize_filter)
{
/*
Cubic Filters using B,C determined values:
Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter
Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears
Spline B = 1 C = 0 B-Spline Gaussian approximation
Hermite B = 0 C = 0 B-Spline interpolator
See paper by Mitchell and Netravali, Reconstruction Filters in Computer
Graphics Computer Graphics, Volume 22, Number 4, August 1988
http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/
Mitchell.pdf.
Coefficents are determined from B,C values:
P0 = ( 6 - 2*B )/6 = coeff[0]
P1 = 0
P2 = (-18 +12*B + 6*C )/6 = coeff[1]
P3 = ( 12 - 9*B - 6*C )/6 = coeff[2]
Q0 = ( 8*B +24*C )/6 = coeff[3]
Q1 = ( -12*B -48*C )/6 = coeff[4]
Q2 = ( 6*B +30*C )/6 = coeff[5]
Q3 = ( - 1*B - 6*C )/6 = coeff[6]
which are used to define the filter:
P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1
Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2
which ensures function is continuous in value and derivative (slope).
*/
if (x < 1.0)
return(resize_filter->coefficient[0]+x*(x*
(resize_filter->coefficient[1]+x*resize_filter->coefficient[2])));
if (x < 2.0)
return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x*
(resize_filter->coefficient[5]+x*resize_filter->coefficient[6])));
return(0.0);
}
static double CubicSpline(const double x,const ResizeFilter *resize_filter)
{
if (resize_filter->support <= 2.0)
{
/*
2-lobe Spline filter.
*/
if (x < 1.0)
return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0);
if (x < 2.0)
return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0));
return(0.0);
}
if (resize_filter->support <= 3.0)
{
/*
3-lobe Spline filter.
*/
if (x < 1.0)
return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0);
if (x < 2.0)
return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0));
if (x < 3.0)
return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0));
return(0.0);
}
/*
4-lobe Spline filter.
*/
if (x < 1.0)
return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0);
if (x < 2.0)
return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0));
if (x < 3.0)
return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0));
if (x < 4.0)
return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0));
return(0.0);
}
static double Gaussian(const double x,const ResizeFilter *resize_filter)
{
/*
Gaussian with a sigma = 1/2 (or as user specified)
Gaussian Formula (1D) ...
exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2))
Gaussian Formula (2D) ...
exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
or for radius
exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) )
Note that it is only a change from 1-d to radial form is in the
normalization multiplier which is not needed or used when Gaussian is used
as a filter.
The constants are pre-calculated...
coeff[0]=sigma;
coeff[1]=1.0/(2.0*sigma^2);
coeff[2]=1.0/(sqrt(2*PI)*sigma^2);
exp( -coeff[1]*(x^2)) ) * coeff[2];
However the multiplier coeff[1] is need, the others are informative only.
This separates the gaussian 'sigma' value from the 'blur/support'
settings allowing for its use in special 'small sigma' gaussians,
without the filter 'missing' pixels because the support becomes too
small.
*/
return(exp((double)(-resize_filter->coefficient[1]*x*x)));
}
static double Hann(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Cosine window function:
0.5+0.5*cos(pi*x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.5+0.5*cosine);
}
static double Hamming(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
/*
Offset cosine window function:
.54 + .46 cos(pi x).
*/
const double cosine=cos((double) (MagickPI*x));
magick_unreferenced(resize_filter);
return(0.54+0.46*cosine);
}
static double Jinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions.
http://mathworld.wolfram.com/JincFunction.html and page 11 of
http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf
The original "zoom" program by Paul Heckbert called this "Bessel". But
really it is more accurately named "Jinc".
*/
if (x == 0.0)
return(0.5*MagickPI);
return(BesselOrderOne(MagickPI*x)/x);
}
static double Kaiser(const double x,const ResizeFilter *resize_filter)
{
/*
Kaiser Windowing Function (bessel windowing)
I0( beta * sqrt( 1-x^2) ) / IO(0)
Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5).
However it is typically defined in terms of Alpha*PI
The normalization factor (coeff[1]) is not actually needed,
but without it the filters has a large value at x=0 making it
difficult to compare the function with other windowing functions.
*/
return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]*
sqrt((double) (1.0-x*x))));
}
static double Lagrange(const double x,const ResizeFilter *resize_filter)
{
double
value;
register ssize_t
i;
ssize_t
n,
order;
/*
Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange
function and depends on the overall support window size of the filter. That
is: for a support of 2, it gives a lagrange-4 (piecewise cubic function).
"n" identifies the piece of the piecewise polynomial.
See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging,
Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064.
*/
if (x > resize_filter->support)
return(0.0);
order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */
n=(ssize_t) (resize_filter->window_support+x);
value=1.0f;
for (i=0; i < order; i++)
if (i != n)
value*=(n-i-x)/(n-i);
return(value);
}
static double Quadratic(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
2rd order (quadratic) B-Spline approximation of Gaussian.
*/
if (x < 0.5)
return(0.75-x*x);
if (x < 1.5)
return(0.5*(x-1.5)*(x-1.5));
return(0.0);
}
static double Sinc(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Scaled sinc(x) function using a trig call:
sinc(x) == sin(pi x)/(pi x).
*/
if (x != 0.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
return((double) 1.0);
}
static double SincFast(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Approximations of the sinc function sin(pi x)/(pi x) over the interval
[-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding
from the Natural Sciences and Engineering Research Council of Canada.
Although the approximations are polynomials (for low order of
approximation) and quotients of polynomials (for higher order of
approximation) and consequently are similar in form to Taylor polynomials /
Pade approximants, the approximations are computed with a completely
different technique.
Summary: These approximations are "the best" in terms of bang (accuracy)
for the buck (flops). More specifically: Among the polynomial quotients
that can be computed using a fixed number of flops (with a given "+ - * /
budget"), the chosen polynomial quotient is the one closest to the
approximated function with respect to maximum absolute relative error over
the given interval.
The Remez algorithm, as implemented in the boost library's minimax package,
is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/
math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html
If outside of the interval of approximation, use the standard trig formula.
*/
if (x > 4.0)
{
const double alpha=(double) (MagickPI*x);
return(sin((double) alpha)/alpha);
}
{
/*
The approximations only depend on x^2 (sinc is an even function).
*/
const double xx = x*x;
#if MAGICKCORE_QUANTUM_DEPTH <= 8
/*
Maximum absolute relative error 6.3e-6 < 1/2^17.
*/
const double c0 = 0.173610016489197553621906385078711564924e-2L;
const double c1 = -0.384186115075660162081071290162149315834e-3L;
const double c2 = 0.393684603287860108352720146121813443561e-4L;
const double c3 = -0.248947210682259168029030370205389323899e-5L;
const double c4 = 0.107791837839662283066379987646635416692e-6L;
const double c5 = -0.324874073895735800961260474028013982211e-8L;
const double c6 = 0.628155216606695311524920882748052490116e-10L;
const double c7 = -0.586110644039348333520104379959307242711e-12L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#elif MAGICKCORE_QUANTUM_DEPTH <= 16
/*
Max. abs. rel. error 2.2e-8 < 1/2^25.
*/
const double c0 = 0.173611107357320220183368594093166520811e-2L;
const double c1 = -0.384240921114946632192116762889211361285e-3L;
const double c2 = 0.394201182359318128221229891724947048771e-4L;
const double c3 = -0.250963301609117217660068889165550534856e-5L;
const double c4 = 0.111902032818095784414237782071368805120e-6L;
const double c5 = -0.372895101408779549368465614321137048875e-8L;
const double c6 = 0.957694196677572570319816780188718518330e-10L;
const double c7 = -0.187208577776590710853865174371617338991e-11L;
const double c8 = 0.253524321426864752676094495396308636823e-13L;
const double c9 = -0.177084805010701112639035485248501049364e-15L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9))))))));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p);
#else
/*
Max. abs. rel. error 1.2e-12 < 1/2^39.
*/
const double c0 = 0.173611111110910715186413700076827593074e-2L;
const double c1 = -0.289105544717893415815859968653611245425e-3L;
const double c2 = 0.206952161241815727624413291940849294025e-4L;
const double c3 = -0.834446180169727178193268528095341741698e-6L;
const double c4 = 0.207010104171026718629622453275917944941e-7L;
const double c5 = -0.319724784938507108101517564300855542655e-9L;
const double c6 = 0.288101675249103266147006509214934493930e-11L;
const double c7 = -0.118218971804934245819960233886876537953e-13L;
const double p =
c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7))))));
const double d0 = 1.0L;
const double d1 = 0.547981619622284827495856984100563583948e-1L;
const double d2 = 0.134226268835357312626304688047086921806e-2L;
const double d3 = 0.178994697503371051002463656833597608689e-4L;
const double d4 = 0.114633394140438168641246022557689759090e-6L;
const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4)));
return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p);
#endif
}
}
static double Triangle(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or
a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function
for Sinc().
*/
if (x < 1.0)
return(1.0-x);
return(0.0);
}
static double Welch(const double x,
const ResizeFilter *magick_unused(resize_filter))
{
magick_unreferenced(resize_filter);
/*
Welch parabolic windowing filter.
*/
if (x < 1.0)
return(1.0-x*x);
return(0.0);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResizeFilter() allocates the ResizeFilter structure. Choose from
% these filters:
%
% FIR (Finite impulse Response) Filters
% Box Triangle Quadratic
% Spline Hermite Catrom
% Mitchell
%
% IIR (Infinite impulse Response) Filters
% Gaussian Sinc Jinc (Bessel)
%
% Windowed Sinc/Jinc Filters
% Blackman Bohman Lanczos
% Hann Hamming Cosine
% Kaiser Welch Parzen
% Bartlett
%
% Special Purpose Filters
% Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp
% Robidoux RobidouxSharp
%
% The users "-filter" selection is used to lookup the default 'expert'
% settings for that filter from a internal table. However any provided
% 'expert' settings (see below) may override this selection.
%
% FIR filters are used as is, and are limited to that filters support window
% (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also
% simply clipped by its support size (currently 1.5 or approximately 3*sigma
% as recommended by many references)
%
% The special a 'cylindrical' filter flag will promote the default 4-lobed
% Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better
% suited to this style of image resampling. This typically happens when using
% such a filter for images distortions.
%
% SPECIFIC FILTERS:
%
% Directly requesting 'Sinc', 'Jinc' function as a filter will force the use
% of function without any windowing, or promotion for cylindrical usage. This
% is not recommended, except by image processing experts, especially as part
% of expert option filter function selection.
%
% Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is
% computed using the traditional sin(pi*x)/(pi*x); it is selected if the user
% specifically specifies the use of a Sinc filter. SincFast uses highly
% accurate (and fast) polynomial (low Q) and rational (high Q) approximations,
% and will be used by default in most cases.
%
% The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted
% to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use).
% The Sinc version is the most popular windowed filter.
%
% LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of
% the Lanczos filter, specifically designed for EWA distortion (as a
% Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos
% (Sinc-Sinc) filter. The chosen blur value comes as close as possible to
% satisfying the following condition without changing the character of the
% corresponding EWA filter:
%
% 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with
% only vertical or horizontal features are preserved when performing 'no-op"
% with EWA distortion.
%
% The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos
% filters. The 'sharp' version uses a blur factor of 0.9549963639785485,
% again chosen because the resulting EWA filter comes as close as possible to
% satisfying the above condition.
%
% Robidoux is another filter tuned for EWA. It is the Keys cubic filter
% defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op'
% Vertical and Horizontal Line Preservation Condition" exactly, and it
% moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns
% out to be close to both Mitchell and Lanczos2Sharp. For example, its first
% crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the
% first crossing of Mitchell and Lanczos2Sharp.
%
% RodidouxSharp is a slightly sharper version of Rodidoux, some believe it
% is too sharp. It is designed to minimize the maximum possible change in
% a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op
% conditions. Amazingly Mitchell falls roughly between Rodidoux and
% RodidouxSharp, though this seems to have been pure coincidence.
%
% 'EXPERT' OPTIONS:
%
% These artifact "defines" are not recommended for production use without
% expert knowledge of resampling, filtering, and the effects they have on the
% resulting resampled (resized or distorted) image.
%
% They can be used to override any and all filter default, and it is
% recommended you make good use of "filter:verbose" to make sure that the
% overall effect of your selection (before and after) is as expected.
%
% "filter:verbose" controls whether to output the exact results of the
% filter selections made, as well as plotting data for graphing the
% resulting filter over the filters support range.
%
% "filter:filter" select the main function associated with this filter
% name, as the weighting function of the filter. This can be used to
% set a windowing function as a weighting function, for special
% purposes, such as graphing.
%
% If a "filter:window" operation has not been provided, a 'Box'
% windowing function will be set to denote that no windowing function is
% being used.
%
% "filter:window" Select this windowing function for the filter. While any
% filter could be used as a windowing function, using the 'first lobe' of
% that filter over the whole support window, using a non-windowing
% function is not advisible. If no weighting filter function is specified
% a 'SincFast' filter is used.
%
% "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a
% simpler method of setting filter support size that will correctly
% handle the Sinc/Jinc switch for an operators filtering requirements.
% Only integers should be given.
%
% "filter:support" Set the support size for filtering to the size given.
% This not recommended for Sinc/Jinc windowed filters (lobes should be
% used instead). This will override any 'filter:lobes' option.
%
% "filter:win-support" Scale windowing function to this size instead. This
% causes the windowing (or self-windowing Lagrange filter) to act is if
% the support window it much much larger than what is actually supplied
% to the calling operator. The filter however is still clipped to the
% real support size given, by the support range supplied to the caller.
% If unset this will equal the normal filter support size.
%
% "filter:blur" Scale the filter and support window by this amount. A value
% of > 1 will generally result in a more blurred image with more ringing
% effects, while a value <1 will sharpen the resulting image with more
% aliasing effects.
%
% "filter:sigma" The sigma value to use for the Gaussian filter only.
% Defaults to '1/2'. Using a different sigma effectively provides a
% method of using the filter as a 'blur' convolution. Particularly when
% using it for Distort.
%
% "filter:b"
% "filter:c" Override the preset B,C values for a Cubic filter.
% If only one of these are given it is assumes to be a 'Keys' type of
% filter such that B+2C=1, where Keys 'alpha' value = C.
%
% Examples:
%
% Set a true un-windowed Sinc filter with 10 lobes (very slow):
% -define filter:filter=Sinc
% -define filter:lobes=8
%
% Set an 8 lobe Lanczos (Sinc or Jinc) filter:
% -filter Lanczos
% -define filter:lobes=8
%
% The format of the AcquireResizeFilter method is:
%
% ResizeFilter *AcquireResizeFilter(const Image *image,
% const FilterType filter_type,const MagickBooleanType cylindrical,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filter: the filter type, defining a preset filter, window and support.
% The artifact settings listed above will override those selections.
%
% o blur: blur the filter by this amount, use 1.0 if unknown. Image
% artifact "filter:blur" will override this API call usage, including any
% internal change (such as for cylindrical usage).
%
% o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial)
% filter (Jinc).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image,
const FilterType filter,const MagickBooleanType cylindrical,
ExceptionInfo *exception)
{
const char
*artifact;
FilterType
filter_type,
window_type;
double
B,
C,
value;
register ResizeFilter
*resize_filter;
/*
Table Mapping given Filter, into Weighting and Windowing functions.
A 'Box' windowing function means its a simble non-windowed filter.
An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a
"cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was
specifically requested by the user.
WARNING: The order of this table must match the order of the FilterType
enumeration specified in "resample.h", or the filter names will not match
the filter being setup.
You can check filter setups with the "filter:verbose" expert setting.
*/
static struct
{
FilterType
filter,
window;
} const mapping[SentinelFilter] =
{
{ UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */
{ PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */
{ BoxFilter, BoxFilter }, /* Box averaging filter */
{ TriangleFilter, BoxFilter }, /* Linear interpolation filter */
{ HermiteFilter, BoxFilter }, /* Hermite interpolation filter */
{ SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */
{ SincFastFilter, HammingFilter }, /* Hamming -- '' variation */
{ SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */
{ GaussianFilter, BoxFilter }, /* Gaussian blur filter */
{ QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */
{ CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */
{ CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */
{ MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */
{ JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */
{ SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */
{ SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */
{ SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */
{ LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */
{ SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */
{ SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */
{ SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */
{ LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */
{ LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */
{ LanczosSharpFilter, LanczosSharpFilter }, /* | these require */
{ Lanczos2Filter, Lanczos2Filter }, /* | special handling */
{ Lanczos2SharpFilter, Lanczos2SharpFilter },
{ RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */
{ RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */
{ LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */
{ SplineFilter, BoxFilter }, /* Spline Cubic Filter */
{ LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */
{ CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */
};
/*
Table mapping the filter/window from the above table to an actual function.
The default support size for that filter as a weighting function, the range
to scale with to use that function as a sinc windowing function, (typ 1.0).
Note that the filter_type -> function is 1 to 1 except for Sinc(),
SincFast(), and CubicBC() functions, which may have multiple filter to
function associations.
See "filter:verbose" handling below for the function -> filter mapping.
*/
static struct
{
double
(*function)(const double,const ResizeFilter*),
support, /* Default lobes/support size of the weighting filter. */
scale, /* Support when function used as a windowing function
Typically equal to the location of the first zero crossing. */
B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */
ResizeWeightingFunctionType weightingFunctionType;
} const filters[SentinelFilter] =
{
/* .--- support window (if used as a Weighting Function)
| .--- first crossing (if used as a Windowing Function)
| | .--- B value for Cubic Function
| | | .---- C value for Cubic Function
| | | | */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */
{ Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */
{ Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */
{ CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */
{ Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */
{ Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */
{ Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */
{ Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */
{ Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */
{ CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */
{ CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */
{ Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */
{ Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */
{ SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */
{ Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */
{ Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */
{ Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */
{ Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */
{ Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */
{ SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */
/* Robidoux: Keys cubic close to Lanczos2D sharpened */
{ CubicBC, 2.0, 1.1685777620836932,
0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction },
/* RobidouxSharp: Sharper version of Robidoux */
{ CubicBC, 2.0, 1.105822933719019,
0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction },
{ Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */
{ CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */
{ SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */
{ CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */
};
/*
The known zero crossings of the Jinc() or more accurately the Jinc(x*PI)
function being used as a filter. It is used by the "filter:lobes" expert
setting and for 'lobes' for Jinc functions in the previous table. This way
users do not have to deal with the highly irrational lobe sizes of the Jinc
filter.
Values taken from
http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp
using Jv-function with v=1, then dividing by PI.
*/
static double
jinc_zeros[16] =
{
1.2196698912665045,
2.2331305943815286,
3.2383154841662362,
4.2410628637960699,
5.2427643768701817,
6.2439216898644877,
7.2447598687199570,
8.2453949139520427,
9.2458926849494673,
10.246293348754916,
11.246622794877883,
12.246898461138105,
13.247132522181061,
14.247333735806849,
15.247508563037300,
16.247661874700962
};
/*
Allocate resize filter.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(UndefinedFilter < filter && filter < SentinelFilter);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter));
(void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter));
/*
Defaults for the requested filter.
*/
filter_type=mapping[filter].filter;
window_type=mapping[filter].window;
resize_filter->blur=1.0;
/* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */
if ( cylindrical != MagickFalse && (filter_type == SincFastFilter) &&
(filter != SincFastFilter))
filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */
/* Expert filter setting override */
artifact=GetImageArtifact(image,"filter:filter");
if (IsStringTrue(artifact) != MagickFalse)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{ /* Raw filter request - no window function. */
filter_type=(FilterType) option;
window_type=BoxFilter;
}
/* Filter override with a specific window function. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
window_type=(FilterType) option;
}
}
else
{
/* Window specified, but no filter function? Assume Sinc/Jinc. */
artifact=GetImageArtifact(image,"filter:window");
if (artifact != (const char *) NULL)
{
ssize_t
option;
option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact);
if ((UndefinedFilter < option) && (option < SentinelFilter))
{
filter_type= cylindrical != MagickFalse ? JincFilter
: SincFastFilter;
window_type=(FilterType) option;
}
}
}
/* Assign the real functions to use for the filters selected. */
resize_filter->filter=filters[filter_type].function;
resize_filter->support=filters[filter_type].support;
resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType;
resize_filter->window=filters[window_type].function;
resize_filter->windowWeightingType=filters[window_type].weightingFunctionType;
resize_filter->scale=filters[window_type].scale;
resize_filter->signature=MagickCoreSignature;
/* Filter Modifications for orthogonal/cylindrical usage */
if (cylindrical != MagickFalse)
switch (filter_type)
{
case BoxFilter:
/* Support for Cylindrical Box should be sqrt(2)/2 */
resize_filter->support=(double) MagickSQ1_2;
break;
case LanczosFilter:
case LanczosSharpFilter:
case Lanczos2Filter:
case Lanczos2SharpFilter:
case LanczosRadiusFilter:
resize_filter->filter=filters[JincFilter].function;
resize_filter->window=filters[JincFilter].function;
resize_filter->scale=filters[JincFilter].scale;
/* number of lobes (support window size) remain unchanged */
break;
default:
break;
}
/* Global Sharpening (regardless of orthoginal/cylindrical) */
switch (filter_type)
{
case LanczosSharpFilter:
resize_filter->blur *= 0.9812505644269356;
break;
case Lanczos2SharpFilter:
resize_filter->blur *= 0.9549963639785485;
break;
/* case LanczosRadius: blur adjust is done after lobes */
default:
break;
}
/*
Expert Option Modifications.
*/
/* User Gaussian Sigma Override - no support change */
if ((resize_filter->filter == Gaussian) ||
(resize_filter->window == Gaussian) ) {
value=0.5; /* guassian sigma default, half pixel */
artifact=GetImageArtifact(image,"filter:sigma");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
/* Define coefficents for Gaussian */
resize_filter->coefficient[0]=value; /* note sigma too */
resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */
resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value);
/* normalization - not actually needed or used! */
if ( value > 0.5 )
resize_filter->support *= 2*value; /* increase support linearly */
}
/* User Kaiser Alpha Override - no support change */
if ((resize_filter->filter == Kaiser) ||
(resize_filter->window == Kaiser) ) {
value=6.5; /* default beta value for Kaiser bessel windowing function */
artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-beta");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"filter:kaiser-alpha");
if (artifact != (const char *) NULL)
value=StringToDouble(artifact,(char **) NULL)*MagickPI;
/* Define coefficents for Kaiser Windowing Function */
resize_filter->coefficient[0]=value; /* alpha */
resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value));
/* normalization */
}
/* Support Overrides */
artifact=GetImageArtifact(image,"filter:lobes");
if (artifact != (const char *) NULL)
{
ssize_t
lobes;
lobes=(ssize_t) StringToLong(artifact);
if (lobes < 1)
lobes=1;
resize_filter->support=(double) lobes;
}
if (resize_filter->filter == Jinc)
{
/*
Convert a Jinc function lobes value to a real support value.
*/
if (resize_filter->support > 16)
resize_filter->support=jinc_zeros[15]; /* largest entry in table */
else
resize_filter->support=jinc_zeros[((long) resize_filter->support)-1];
/*
Blur this filter so support is a integer value (lobes dependant).
*/
if (filter_type == LanczosRadiusFilter)
resize_filter->blur*=floor(resize_filter->support)/
resize_filter->support;
}
/*
Expert blur override.
*/
artifact=GetImageArtifact(image,"filter:blur");
if (artifact != (const char *) NULL)
resize_filter->blur*=StringToDouble(artifact,(char **) NULL);
if (resize_filter->blur < MagickEpsilon)
resize_filter->blur=(double) MagickEpsilon;
/*
Expert override of the support setting.
*/
artifact=GetImageArtifact(image,"filter:support");
if (artifact != (const char *) NULL)
resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Scale windowing function separately to the support 'clipping' window
that calling operator is planning to actually use. (Expert override)
*/
resize_filter->window_support=resize_filter->support; /* default */
artifact=GetImageArtifact(image,"filter:win-support");
if (artifact != (const char *) NULL)
resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL));
/*
Adjust window function scaling to match windowing support for weighting
function. This avoids a division on every filter call.
*/
resize_filter->scale/=resize_filter->window_support;
/*
* Set Cubic Spline B,C values, calculate Cubic coefficients.
*/
B=0.0;
C=0.0;
if ((resize_filter->filter == CubicBC) ||
(resize_filter->window == CubicBC) )
{
B=filters[filter_type].B;
C=filters[filter_type].C;
if (filters[window_type].function == CubicBC)
{
B=filters[window_type].B;
C=filters[window_type].C;
}
artifact=GetImageArtifact(image,"filter:b");
if (artifact != (const char *) NULL)
{
B=StringToDouble(artifact,(char **) NULL);
C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */
artifact=GetImageArtifact(image,"filter:c"); /* user C override */
if (artifact != (const char *) NULL)
C=StringToDouble(artifact,(char **) NULL);
}
else
{
artifact=GetImageArtifact(image,"filter:c");
if (artifact != (const char *) NULL)
{
C=StringToDouble(artifact,(char **) NULL);
B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */
}
}
{
const double
twoB = B+B;
/*
Convert B,C values into Cubic Coefficents. See CubicBC().
*/
resize_filter->coefficient[0]=1.0-(1.0/3.0)*B;
resize_filter->coefficient[1]=-3.0+twoB+C;
resize_filter->coefficient[2]=2.0-1.5*B-C;
resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C;
resize_filter->coefficient[4]=-8.0*C-twoB;
resize_filter->coefficient[5]=B+5.0*C;
resize_filter->coefficient[6]=(-1.0/6.0)*B-C;
}
}
/*
Expert Option Request for verbose details of the resulting filter.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp master
{
#endif
if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse)
{
double
support,
x;
/*
Set the weighting function properly when the weighting function
may not exactly match the filter of the same name. EG: a Point
filter is really uses a Box weighting function with a different
support than is typically used.
*/
if (resize_filter->filter == Box) filter_type=BoxFilter;
if (resize_filter->filter == Sinc) filter_type=SincFilter;
if (resize_filter->filter == SincFast) filter_type=SincFastFilter;
if (resize_filter->filter == Jinc) filter_type=JincFilter;
if (resize_filter->filter == CubicBC) filter_type=CubicFilter;
if (resize_filter->window == Box) window_type=BoxFilter;
if (resize_filter->window == Sinc) window_type=SincFilter;
if (resize_filter->window == SincFast) window_type=SincFastFilter;
if (resize_filter->window == Jinc) window_type=JincFilter;
if (resize_filter->window == CubicBC) window_type=CubicFilter;
/*
Report Filter Details.
*/
support=GetResizeFilterSupport(resize_filter); /* practical_support */
(void) FormatLocaleFile(stdout,
"# Resampling Filter (for graphing)\n#\n");
(void) FormatLocaleFile(stdout,"# filter = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,filter_type));
(void) FormatLocaleFile(stdout,"# window = %s\n",
CommandOptionToMnemonic(MagickFilterOptions,window_type));
(void) FormatLocaleFile(stdout,"# support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->support);
(void) FormatLocaleFile(stdout,"# window-support = %.*g\n",
GetMagickPrecision(),(double) resize_filter->window_support);
(void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n",
GetMagickPrecision(),(double)resize_filter->blur);
if ((filter_type == GaussianFilter) || (window_type == GaussianFilter))
(void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
if ( filter_type == KaiserFilter || window_type == KaiserFilter )
(void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n",
GetMagickPrecision(),(double)resize_filter->coefficient[0]);
(void) FormatLocaleFile(stdout,"# practical-support = %.*g\n",
GetMagickPrecision(), (double)support);
if ( filter_type == CubicFilter || window_type == CubicFilter )
(void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n",
GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C);
(void) FormatLocaleFile(stdout,"\n");
/*
Output values of resulting filter graph -- for graphing filter result.
*/
for (x=0.0; x <= support; x+=0.01f)
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x,
GetMagickPrecision(),(double)
GetResizeFilterWeight(resize_filter,x));
/*
A final value so gnuplot can graph the 'stop' properly.
*/
(void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support,
GetMagickPrecision(),0.0);
}
/* Output the above once only for each image - remove setting */
(void) DeleteImageArtifact((Image *) image,"filter:verbose");
#if defined(MAGICKCORE_OPENMP_SUPPORT)
}
#endif
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveResizeImage() adaptively resize image with pixel resampling.
%
% This is shortcut function for a fast interpolative resize using mesh
% interpolation. It works well for small resizes of less than +/- 50%
% of the original image size. For larger resizing on images a full
% filtered and slower resize function should be used instead.
%
% The format of the AdaptiveResizeImage method is:
%
% Image *AdaptiveResizeImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveResizeImage(const Image *image,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
Image
*resize_image;
resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel,
exception);
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ B e s s e l O r d e r O n e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BesselOrderOne() computes the Bessel function of x of the first kind of
% order 0. This is used to create the Jinc() filter function below.
%
% Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8]
%
% j1(x) = x*j1(x);
%
% For x in (8,inf)
%
% j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
%
% where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow:
%
% cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
% = 1/sqrt(2) * (sin(x) - cos(x))
% sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
% = -1/sqrt(2) * (sin(x) + cos(x))
%
% The format of the BesselOrderOne method is:
%
% double BesselOrderOne(double x)
%
% A description of each parameter follows:
%
% o x: double value.
%
*/
#undef I0
static double I0(double x)
{
double
sum,
t,
y;
register ssize_t
i;
/*
Zeroth order Bessel function of the first kind.
*/
sum=1.0;
y=x*x/4.0;
t=y;
for (i=2; t > MagickEpsilon; i++)
{
sum+=t;
t*=y/((double) i*i);
}
return(sum);
}
#undef J1
static double J1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.581199354001606143928050809e+21,
-0.6672106568924916298020941484e+20,
0.2316433580634002297931815435e+19,
-0.3588817569910106050743641413e+17,
0.2908795263834775409737601689e+15,
-0.1322983480332126453125473247e+13,
0.3413234182301700539091292655e+10,
-0.4695753530642995859767162166e+7,
0.270112271089232341485679099e+4
},
Qone[] =
{
0.11623987080032122878585294e+22,
0.1185770712190320999837113348e+20,
0.6092061398917521746105196863e+17,
0.2081661221307607351240184229e+15,
0.5243710262167649715406728642e+12,
0.1013863514358673989967045588e+10,
0.1501793594998585505921097578e+7,
0.1606931573481487801970916749e+4,
0.1e+1
};
p=Pone[8];
q=Qone[8];
for (i=7; i >= 0; i--)
{
p=p*x*x+Pone[i];
q=q*x*x+Qone[i];
}
return(p/q);
}
#undef P1
static double P1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.352246649133679798341724373e+5,
0.62758845247161281269005675e+5,
0.313539631109159574238669888e+5,
0.49854832060594338434500455e+4,
0.2111529182853962382105718e+3,
0.12571716929145341558495e+1
},
Qone[] =
{
0.352246649133679798068390431e+5,
0.626943469593560511888833731e+5,
0.312404063819041039923015703e+5,
0.4930396490181088979386097e+4,
0.2030775189134759322293574e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
#undef Q1
static double Q1(double x)
{
double
p,
q;
register ssize_t
i;
static const double
Pone[] =
{
0.3511751914303552822533318e+3,
0.7210391804904475039280863e+3,
0.4259873011654442389886993e+3,
0.831898957673850827325226e+2,
0.45681716295512267064405e+1,
0.3532840052740123642735e-1
},
Qone[] =
{
0.74917374171809127714519505e+4,
0.154141773392650970499848051e+5,
0.91522317015169922705904727e+4,
0.18111867005523513506724158e+4,
0.1038187585462133728776636e+3,
0.1e+1
};
p=Pone[5];
q=Qone[5];
for (i=4; i >= 0; i--)
{
p=p*(8.0/x)*(8.0/x)+Pone[i];
q=q*(8.0/x)*(8.0/x)+Qone[i];
}
return(p/q);
}
static double BesselOrderOne(double x)
{
double
p,
q;
if (x == 0.0)
return(0.0);
p=x;
if (x < 0.0)
x=(-x);
if (x < 8.0)
return(p*J1(x));
q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)-
cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+
cos((double) x))));
if (p < 0.0)
q=(-q);
return(q);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y R e s i z e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResizeFilter() destroy the resize filter.
%
% The format of the DestroyResizeFilter method is:
%
% ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o resize_filter: the resize filter.
%
*/
MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
resize_filter->signature=(~MagickCoreSignature);
resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter);
return(resize_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r S u p p o r t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterSupport() return the current support window size for this
% filter. Note that this may have been enlarged by filter:blur factor.
%
% The format of the GetResizeFilterSupport method is:
%
% double GetResizeFilterSupport(const ResizeFilter *resize_filter)
%
% A description of each parameter follows:
%
% o filter: Image filter to use.
%
*/
MagickPrivate double *GetResizeFilterCoefficient(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return((double *) resize_filter->coefficient);
}
MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->blur);
}
MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->scale);
}
MagickPrivate double GetResizeFilterWindowSupport(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->window_support);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->filterWeightingType);
}
MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType(
const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->windowWeightingType);
}
MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter)
{
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
return(resize_filter->support*resize_filter->blur);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t R e s i z e F i l t e r W e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetResizeFilterWeight evaluates the specified resize filter at the point x
% which usally lies between zero and the filters current 'support' and
% returns the weight of the filter function at that point.
%
% The format of the GetResizeFilterWeight method is:
%
% double GetResizeFilterWeight(const ResizeFilter *resize_filter,
% const double x)
%
% A description of each parameter follows:
%
% o filter: the filter type.
%
% o x: the point.
%
*/
MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter,
const double x)
{
double
scale,
weight,
x_blur;
/*
Windowing function - scale the weighting filter by this amount.
*/
assert(resize_filter != (ResizeFilter *) NULL);
assert(resize_filter->signature == MagickCoreSignature);
x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */
if ((resize_filter->window_support < MagickEpsilon) ||
(resize_filter->window == Box))
scale=1.0; /* Point or Box Filter -- avoid division by zero */
else
{
scale=resize_filter->scale;
scale=resize_filter->window(x_blur*scale,resize_filter);
}
weight=scale*resize_filter->filter(x_blur,resize_filter);
return(weight);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p o l a t i v e R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpolativeResizeImage() resizes an image using the specified
% interpolation method.
%
% The format of the InterpolativeResizeImage method is:
%
% Image *InterpolativeResizeImage(const Image *image,const size_t columns,
% const size_t rows,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the resized image.
%
% o rows: the number of rows in the resized image.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *InterpolativeResizeImage(const Image *image,
const size_t columns,const size_t rows,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define InterpolativeResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
Image
*resize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
scale;
ssize_t
y;
/*
Interpolatively resize image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
scale.x=(double) image->columns/resize_image->columns;
scale.y=(double) image->rows/resize_image->rows;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
PointInfo
offset;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
continue;
offset.y=((double) y+0.5)*scale.y-0.5;
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(resize_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
offset.x=((double) x+0.5)*scale.x-0.5;
status=InterpolatePixelChannels(image,image_view,resize_image,method,
offset.x,offset.y,q,exception);
if (status == MagickFalse)
break;
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InterpolativeResizeImage)
#endif
proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
resize_image=DestroyImage(resize_image);
return(resize_image);
}
#if defined(MAGICKCORE_LQR_DELEGATE)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i q u i d R e s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LiquidRescaleImage() rescales image with seam carving.
%
% The format of the LiquidRescaleImage method is:
%
% Image *LiquidRescaleImage(const Image *image,const size_t columns,
% const size_t rows,const double delta_x,const double rigidity,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the rescaled image.
%
% o rows: the number of rows in the rescaled image.
%
% o delta_x: maximum seam transversal step (0 means straight seams).
%
% o rigidity: introduce a bias for non-straight seams (typically 0).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns,
const size_t rows,const double delta_x,const double rigidity,
ExceptionInfo *exception)
{
#define LiquidRescaleImageTag "Rescale/Image"
CacheView
*image_view,
*rescale_view;
gfloat
*packet,
*pixels;
Image
*rescale_image;
int
x_offset,
y_offset;
LqrCarver
*carver;
LqrRetVal
lqr_status;
MagickBooleanType
status;
MemoryInfo
*pixel_info;
register gfloat
*q;
ssize_t
y;
/*
Liquid rescale image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
if ((columns <= 2) || (rows <= 2))
return(ResizeImage(image,columns,rows,image->filter,exception));
pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels*
sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
return((Image *) NULL);
pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info);
status=MagickTrue;
q=pixels;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
*q++=QuantumScale*p[i];
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows,
(int) GetPixelChannels(image),LQR_COLDEPTH_32F);
if (carver == (LqrCarver *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
lqr_carver_set_preserve_input_image(carver);
lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity);
lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows);
(void) lqr_status;
rescale_image=CloneImage(image,lqr_carver_get_width(carver),
lqr_carver_get_height(carver),MagickTrue,exception);
if (rescale_image == (Image *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
return((Image *) NULL);
}
if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
rescale_image=DestroyImage(rescale_image);
return((Image *) NULL);
}
rescale_view=AcquireAuthenticCacheView(rescale_image,exception);
(void) lqr_carver_scan_reset(carver);
while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0)
{
register Quantum
*magick_restrict p;
register ssize_t
i;
p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1,
exception);
if (p == (Quantum *) NULL)
break;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
rescale_traits,
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
rescale_traits=GetPixelChannelTraits(rescale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(rescale_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange*
packet[i]),p);
}
if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse)
break;
}
rescale_view=DestroyCacheView(rescale_view);
pixel_info=RelinquishVirtualMemory(pixel_info);
lqr_carver_destroy(carver);
return(rescale_image);
}
#else
MagickExport Image *LiquidRescaleImage(const Image *image,
const size_t magick_unused(columns),const size_t magick_unused(rows),
const double magick_unused(delta_x),const double magick_unused(rigidity),
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
(void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError,
"DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename);
return((Image *) NULL);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagnifyImage() doubles the size of the image with a pixel art scaling
% algorithm.
%
% The format of the MagnifyImage method is:
%
% Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception)
{
#define MagnifyImageTag "Magnify/Image"
CacheView
*image_view,
*magnify_view;
Image
*magnify_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize magnified image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue,
exception);
if (magnify_image == (Image *) NULL)
return((Image *) NULL);
/*
Magnify image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
magnify_view=AcquireAuthenticCacheView(magnify_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_number_threads(image,magnify_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
/*
Magnify this row of pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity[9];
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict r;
register ssize_t
i;
size_t
channels;
p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
channels=GetPixelChannels(image);
for (i=0; i < 9; i++)
intensity[i]=GetPixelIntensity(image,p+i*channels);
r=q;
if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) ||
(fabs(intensity[3]-intensity[5]) < MagickEpsilon))
{
/*
Clone center pixel.
*/
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
else
{
/*
Selectively clone pixel.
*/
if (fabs(intensity[1]-intensity[3]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[1]-intensity[5]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1);
if (fabs(intensity[3]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[3*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
r+=GetPixelChannels(magnify_image);
if (fabs(intensity[5]-intensity[7]) < MagickEpsilon)
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[5*channels+i];
else
for (i=0; i < (ssize_t) channels; i++)
r[i]=p[4*channels+i];
}
q+=2*GetPixelChannels(magnify_image);
}
if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MagnifyImage)
#endif
proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
magnify_view=DestroyCacheView(magnify_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
magnify_image=DestroyImage(magnify_image);
return(magnify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M i n i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MinifyImage() is a convenience method that scales an image proportionally to
% half its size.
%
% The format of the MinifyImage method is:
%
% Image *MinifyImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception)
{
Image
*minify_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter,
exception);
return(minify_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResampleImage() resize image in terms of its pixel size, so that when
% displayed at the given resolution it will be the same size in terms of
% real world units as the original image at the original resolution.
%
% The format of the ResampleImage method is:
%
% Image *ResampleImage(Image *image,const double x_resolution,
% const double y_resolution,const FilterType filter,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image to be resized to fit the given resolution.
%
% o x_resolution: the new image x resolution.
%
% o y_resolution: the new image y resolution.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ResampleImage(const Image *image,const double x_resolution,
const double y_resolution,const FilterType filter,ExceptionInfo *exception)
{
#define ResampleImageTag "Resample/Image"
Image
*resample_image;
size_t
height,
width;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ?
72.0 : image->resolution.x)+0.5);
height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ?
72.0 : image->resolution.y)+0.5);
resample_image=ResizeImage(image,width,height,filter,exception);
if (resample_image != (Image *) NULL)
{
resample_image->resolution.x=x_resolution;
resample_image->resolution.y=y_resolution;
}
return(resample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResizeImage() scales an image to the desired dimensions, using the given
% filter (see AcquireFilterInfo()).
%
% If an undefined filter is given the filter defaults to Mitchell for a
% colormapped image, a image with a matte channel, or if the image is
% enlarged. Otherwise the filter defaults to a Lanczos.
%
% ResizeImage() was inspired by Paul Heckbert's "zoom" program.
%
% The format of the ResizeImage method is:
%
% Image *ResizeImage(Image *image,const size_t columns,const size_t rows,
% const FilterType filter,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o filter: Image filter to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _ContributionInfo
{
double
weight;
ssize_t
pixel;
} ContributionInfo;
static ContributionInfo **DestroyContributionThreadSet(
ContributionInfo **contribution)
{
register ssize_t
i;
assert(contribution != (ContributionInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (contribution[i] != (ContributionInfo *) NULL)
contribution[i]=(ContributionInfo *) RelinquishAlignedMemory(
contribution[i]);
contribution=(ContributionInfo **) RelinquishMagickMemory(contribution);
return(contribution);
}
static ContributionInfo **AcquireContributionThreadSet(const size_t count)
{
register ssize_t
i;
ContributionInfo
**contribution;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads,
sizeof(*contribution));
if (contribution == (ContributionInfo **) NULL)
return((ContributionInfo **) NULL);
(void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution));
for (i=0; i < (ssize_t) number_threads; i++)
{
contribution[i]=(ContributionInfo *) MagickAssumeAligned(
AcquireAlignedMemory(count,sizeof(**contribution)));
if (contribution[i] == (ContributionInfo *) NULL)
return(DestroyContributionThreadSet(contribution));
}
return(contribution);
}
static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double x_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
#define ResizeImageTag "Resize/Image"
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
MagickBooleanType
status;
double
scale,
support;
ssize_t
x;
/*
Apply filter to resize horizontally from image to resize image.
*/
scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,resize_image,resize_image->columns,1)
#endif
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
y;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (x+0.5)/x_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t)
(contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception);
q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j-start].pixel-contribution[0].pixel);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
/*
Alpha blending.
*/
gamma=0.0;
for (j=0; j < n; j++)
{
k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+
(contribution[j].pixel-contribution[0].pixel);
alpha=contribution[j].weight*QuantumScale*
GetPixelAlpha(image,p+k*GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HorizontalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter,
const Image *image,Image *resize_image,const double y_factor,
const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception)
{
CacheView
*image_view,
*resize_view;
ClassType
storage_class;
ContributionInfo
**magick_restrict contributions;
double
scale,
support;
MagickBooleanType
status;
ssize_t
y;
/*
Apply filter to resize vertically from image to resize image.
*/
scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0);
support=scale*GetResizeFilterSupport(resize_filter);
storage_class=support > 0.5 ? DirectClass : image->storage_class;
if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse)
return(MagickFalse);
if (support < 0.5)
{
/*
Support too small even for nearest neighbour: Reduce to point sampling.
*/
support=(double) 0.5;
scale=1.0;
}
contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0));
if (contributions == (ContributionInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
status=MagickTrue;
scale=PerceptibleReciprocal(scale);
image_view=AcquireVirtualCacheView(image,exception);
resize_view=AcquireAuthenticCacheView(resize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,resize_image,resize_image->rows,1)
#endif
for (y=0; y < (ssize_t) resize_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
bisect,
density;
register const Quantum
*magick_restrict p;
register ContributionInfo
*magick_restrict contribution;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
n,
start,
stop;
if (status == MagickFalse)
continue;
bisect=(double) (y+0.5)/y_factor+MagickEpsilon;
start=(ssize_t) MagickMax(bisect-support+0.5,0.0);
stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows);
density=0.0;
contribution=contributions[id];
for (n=0; n < (stop-start); n++)
{
contribution[n].pixel=start+n;
contribution[n].weight=GetResizeFilterWeight(resize_filter,scale*
((double) (start+n)-bisect+0.5));
density+=contribution[n].weight;
}
if (n == 0)
continue;
if ((density != 0.0) && (density != 1.0))
{
register ssize_t
i;
/*
Normalize.
*/
density=PerceptibleReciprocal(density);
for (i=0; i < n; i++)
contribution[i].weight*=density;
}
p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel,
image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),
exception);
q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) resize_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
resize_traits,
traits;
register ssize_t
j;
ssize_t
k;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
resize_traits=GetPixelChannelTraits(resize_image,channel);
if ((traits == UndefinedPixelTrait) ||
(resize_traits == UndefinedPixelTrait))
continue;
if (((resize_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)))
{
j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double)
stop-1.0)+0.5);
k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)*
image->columns+x);
SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i],
q);
continue;
}
pixel=0.0;
if ((resize_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight;
pixel+=alpha*p[k*GetPixelChannels(image)+i];
}
SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q);
continue;
}
gamma=0.0;
for (j=0; j < n; j++)
{
k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)*
image->columns+x);
alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k*
GetPixelChannels(image));
pixel+=alpha*p[k*GetPixelChannels(image)+i];
gamma+=alpha;
}
gamma=PerceptibleReciprocal(gamma);
SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q);
}
q+=GetPixelChannels(resize_image);
}
if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_VerticalFilter)
#endif
proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
resize_view=DestroyCacheView(resize_view);
image_view=DestroyCacheView(image_view);
contributions=DestroyContributionThreadSet(contributions);
return(status);
}
MagickExport Image *ResizeImage(const Image *image,const size_t columns,
const size_t rows,const FilterType filter,ExceptionInfo *exception)
{
double
x_factor,
y_factor;
FilterType
filter_type;
Image
*filter_image,
*resize_image;
MagickOffsetType
offset;
MagickSizeType
span;
MagickStatusType
status;
ResizeFilter
*resize_filter;
/*
Acquire resize image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows) &&
(filter == UndefinedFilter))
return(CloneImage(image,0,0,MagickTrue,exception));
/*
Acquire resize filter.
*/
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
filter_type=LanczosFilter;
if (filter != UndefinedFilter)
filter_type=filter;
else
if ((x_factor == 1.0) && (y_factor == 1.0))
filter_type=PointFilter;
else
if ((image->storage_class == PseudoClass) ||
(image->alpha_trait != UndefinedPixelTrait) ||
((x_factor*y_factor) > 1.0))
filter_type=MitchellFilter;
resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
resize_image=AccelerateResizeImage(image,columns,rows,resize_filter,
exception);
if (resize_image != (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
#endif
resize_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (resize_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(resize_image);
}
if (x_factor > y_factor)
filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception);
else
filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception);
if (filter_image == (Image *) NULL)
{
resize_filter=DestroyResizeFilter(resize_filter);
return(DestroyImage(resize_image));
}
/*
Resize image.
*/
offset=0;
if (x_factor > y_factor)
{
span=(MagickSizeType) (filter_image->columns+rows);
status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span,
&offset,exception);
status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor,
span,&offset,exception);
}
else
{
span=(MagickSizeType) (filter_image->rows+columns);
status=VerticalFilter(resize_filter,image,filter_image,y_factor,span,
&offset,exception);
status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor,
span,&offset,exception);
}
/*
Free resources.
*/
filter_image=DestroyImage(filter_image);
resize_filter=DestroyResizeFilter(resize_filter);
if (status == MagickFalse)
{
resize_image=DestroyImage(resize_image);
return((Image *) NULL);
}
resize_image->type=image->type;
return(resize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S a m p l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SampleImage() scales an image to the desired dimensions with pixel
% sampling. Unlike other scaling methods, this method does not introduce
% any additional color into the scaled image.
%
% The format of the SampleImage method is:
%
% Image *SampleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the sampled image.
%
% o rows: the number of rows in the sampled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SampleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleImageTag "Sample/Image"
CacheView
*image_view,
*sample_view;
Image
*sample_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
x1;
ssize_t
*x_offset,
y;
PointInfo
sample_offset;
/*
Initialize sampled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
sample_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
/*
Set the sampling offset, default is in the mid-point of sample regions.
*/
sample_offset.x=sample_offset.y=0.5-MagickEpsilon;
{
const char
*value;
value=GetImageArtifact(image,"sample:offset");
if (value != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
(void) ParseGeometry(value,&geometry_info);
flags=ParseGeometry(value,&geometry_info);
sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon;
if ((flags & SigmaValue) != 0)
sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon;
}
}
/*
Allocate scan line buffer and column offset buffers.
*/
x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns,
sizeof(*x_offset));
if (x_offset == (ssize_t *) NULL)
{
sample_image=DestroyImage(sample_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (x1=0; x1 < (ssize_t) sample_image->columns; x1++)
x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/
sample_image->columns);
/*
Sample each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sample_view=AcquireAuthenticCacheView(sample_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,sample_image,sample_image->rows,1)
#endif
for (y=0; y < (ssize_t) sample_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
y_offset;
if (status == MagickFalse)
continue;
y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/
sample_image->rows);
p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
/*
Sample each column.
*/
for (x=0; x < (ssize_t) sample_image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(sample_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++)
{
PixelChannel
channel;
PixelTrait
image_traits,
traits;
channel=GetPixelChannelChannel(sample_image,i);
traits=GetPixelChannelTraits(sample_image,channel);
image_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(image_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels(
image)+i],q);
}
q+=GetPixelChannels(sample_image);
}
if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SampleImage)
#endif
proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
sample_view=DestroyCacheView(sample_view);
x_offset=(ssize_t *) RelinquishMagickMemory(x_offset);
sample_image->type=image->type;
if (status == MagickFalse)
sample_image=DestroyImage(sample_image);
return(sample_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleImage() changes the size of an image to the given dimensions.
%
% The format of the ScaleImage method is:
%
% Image *ScaleImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ScaleImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define ScaleImageTag "Scale/Image"
CacheView
*image_view,
*scale_view;
double
alpha,
pixel[CompositePixelChannel],
*scale_scanline,
*scanline,
*x_vector,
*y_vector;
Image
*scale_image;
MagickBooleanType
next_column,
next_row,
proceed,
status;
PixelTrait
scale_traits;
PointInfo
scale,
span;
register ssize_t
i;
ssize_t
n,
number_rows,
y;
/*
Initialize scaled image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((columns == 0) || (rows == 0))
ThrowImageException(ImageError,"NegativeOrZeroImageSize");
if ((columns == image->columns) && (rows == image->rows))
return(CloneImage(image,0,0,MagickTrue,exception));
scale_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (scale_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse)
{
scale_image=DestroyImage(scale_image);
return((Image *) NULL);
}
/*
Allocate memory.
*/
x_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*x_vector));
scanline=x_vector;
if (image->rows != scale_image->rows)
scanline=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*scanline));
scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns,
MaxPixelChannels*sizeof(*scale_scanline));
y_vector=(double *) AcquireQuantumMemory((size_t) image->columns,
MaxPixelChannels*sizeof(*y_vector));
if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) ||
(x_vector == (double *) NULL) || (y_vector == (double *) NULL))
{
if ((image->rows != scale_image->rows) && (scanline != (double *) NULL))
scanline=(double *) RelinquishMagickMemory(scanline);
if (scale_scanline != (double *) NULL)
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (x_vector != (double *) NULL)
x_vector=(double *) RelinquishMagickMemory(x_vector);
if (y_vector != (double *) NULL)
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_image=DestroyImage(scale_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Scale image.
*/
number_rows=0;
next_row=MagickTrue;
span.y=1.0;
scale.y=(double) scale_image->rows/(double) image->rows;
(void) ResetMagickMemory(y_vector,0,(size_t) MaxPixelChannels*image->columns*
sizeof(*y_vector));
n=0;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
scale_view=AcquireAuthenticCacheView(scale_image,exception);
for (y=0; y < (ssize_t) scale_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
break;
q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
alpha=1.0;
if (scale_image->rows == image->rows)
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
}
else
{
/*
Scale Y direction.
*/
while (scale.y < span.y)
{
if ((next_row != MagickFalse) &&
(number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
}
for (x=0; x < (ssize_t) image->columns; x++)
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
y_vector[x*GetPixelChannels(image)+i]+=scale.y*
x_vector[x*GetPixelChannels(image)+i];
span.y-=scale.y;
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows))
{
/*
Read a new scanline.
*/
p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
alpha=QuantumScale*GetPixelAlpha(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & BlendPixelTrait) == 0)
{
x_vector[x*GetPixelChannels(image)+i]=(double) p[i];
continue;
}
x_vector[x*GetPixelChannels(image)+i]=alpha*p[i];
}
p+=GetPixelChannels(image);
}
number_rows++;
next_row=MagickFalse;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y*
x_vector[x*GetPixelChannels(image)+i];
scanline[x*GetPixelChannels(image)+i]=pixel[i];
y_vector[x*GetPixelChannels(image)+i]=0.0;
}
}
scale.y-=span.y;
if (scale.y <= 0)
{
scale.y=(double) scale_image->rows/(double) image->rows;
next_row=MagickTrue;
}
span.y=1.0;
}
if (scale_image->columns == image->columns)
{
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[
x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
else
{
ssize_t
t;
/*
Scale X direction.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
span.x=1.0;
t=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
scale.x=(double) scale_image->columns/(double) image->columns;
while (scale.x >= span.x)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i];
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
}
scale.x-=span.x;
span.x=1.0;
next_column=MagickTrue;
}
if (scale.x > 0)
{
if (next_column != MagickFalse)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0.0;
next_column=MagickFalse;
t++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i];
span.x-=scale.x;
}
}
if (span.x > 0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i];
}
if ((next_column == MagickFalse) &&
(t < (ssize_t) scale_image->columns))
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
scale_scanline[t*GetPixelChannels(image)+i]=pixel[i];
/*
Transfer scanline to scaled image.
*/
for (x=0; x < (ssize_t) scale_image->columns; x++)
{
if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(scale_image);
continue;
}
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+
GetPixelChannelOffset(image,AlphaPixelChannel)];
alpha=PerceptibleReciprocal(alpha);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
scale_traits=GetPixelChannelTraits(scale_image,channel);
if ((traits == UndefinedPixelTrait) ||
(scale_traits == UndefinedPixelTrait))
continue;
if ((traits & BlendPixelTrait) == 0)
{
SetPixelChannel(scale_image,channel,ClampToQuantum(
scale_scanline[x*GetPixelChannels(image)+i]),q);
continue;
}
SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*
scale_scanline[x*GetPixelChannels(image)+i]),q);
}
q+=GetPixelChannels(scale_image);
}
}
if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse)
{
status=MagickFalse;
break;
}
proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
scale_view=DestroyCacheView(scale_view);
image_view=DestroyCacheView(image_view);
/*
Free allocated memory.
*/
y_vector=(double *) RelinquishMagickMemory(y_vector);
scale_scanline=(double *) RelinquishMagickMemory(scale_scanline);
if (scale_image->rows != image->rows)
scanline=(double *) RelinquishMagickMemory(scanline);
x_vector=(double *) RelinquishMagickMemory(x_vector);
scale_image->type=image->type;
if (status == MagickFalse)
scale_image=DestroyImage(scale_image);
return(scale_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T h u m b n a i l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ThumbnailImage() changes the size of an image to the given dimensions and
% removes any associated profiles. The goal is to produce small low cost
% thumbnail images suited for display on the Web.
%
% The format of the ThumbnailImage method is:
%
% Image *ThumbnailImage(const Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the scaled image.
%
% o rows: the number of rows in the scaled image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ThumbnailImage(const Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
#define SampleFactor 5
char
*url,
value[MagickPathExtent];
const char
*name;
Image
*thumbnail_image;
double
x_factor,
y_factor;
struct stat
attributes;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
x_factor=(double) columns/(double) image->columns;
y_factor=(double) rows/(double) image->rows;
if ((x_factor*y_factor) > 0.1)
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128))
thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception);
else
{
Image
*sample_image;
sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows,
exception);
if (sample_image == (Image *) NULL)
return((Image *) NULL);
thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter,
exception);
sample_image=DestroyImage(sample_image);
}
if (thumbnail_image == (Image *) NULL)
return(thumbnail_image);
(void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page);
if (thumbnail_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception);
thumbnail_image->depth=8;
thumbnail_image->interlace=NoInterlace;
/*
Strip all profiles except color profiles.
*/
ResetImageProfileIterator(thumbnail_image);
for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; )
{
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
{
(void) DeleteImageProfile(thumbnail_image,name);
ResetImageProfileIterator(thumbnail_image);
}
name=GetNextImageProfile(thumbnail_image);
}
(void) DeleteImageProperty(thumbnail_image,"comment");
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if (strstr(image->magick_filename,"//") == (char *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"file://%s",
image->magick_filename);
(void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception);
(void) CopyMagickString(value,image->magick_filename,MagickPathExtent);
if ( GetPathAttributes(image->filename,&attributes) != MagickFalse )
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception);
}
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
attributes.st_mtime);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent,
value);
(void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception);
(void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick);
LocaleLower(value);
(void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception);
url=GetMagickHomeURL();
(void) SetImageProperty(thumbnail_image,"software",url,exception);
url=DestroyString(url);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_columns);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->magick_rows);
(void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value,
exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
(void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value,
exception);
return(thumbnail_image);
}
|
linked_omp3_tasks.c | #include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#ifndef N
#define N 5
#endif
#ifndef FS
#define FS 38
#endif
struct node {
int data;
int fibdata;
struct node* next;
};
struct node* init_list(struct node* p);
void processwork(struct node* p);
int fib(int n);
int fib(int n)
{
int x, y;
if (n < 2) {
return (n);
} else {
x = fib(n - 1);
y = fib(n - 2);
return (x + y);
}
}
void processwork(struct node* p)
{
int n, temp;
n = p->data;
temp = fib(n);
p->fibdata = temp;
}
struct node* init_list(struct node* p)
{
int i;
struct node* head = NULL;
struct node* temp = NULL;
head = (struct node*) malloc(sizeof(struct node));
p = head;
p->data = FS;
p->fibdata = 0;
for (i=0; i< N; i++) {
temp = (struct node*) malloc(sizeof(struct node));
p->next = temp;
p = temp;
p->data = FS + i + 1;
p->fibdata = i+1;
}
p->next = NULL;
return head;
}
int main()
{
double start, end;
struct node *p=NULL;
struct node *temp=NULL;
struct node *head=NULL;
printf("Process linked list\n");
printf(" Each linked list node will be processed by function 'processwork()'\n");
printf(" Each ll node will compute %d fibonacci numbers beginning with %d\n",N,FS);
p = init_list(p);
head = p;
start = omp_get_wtime();
#pragma omp parallel
{
#pragma omp master
printf("Threads: %d\n", omp_get_num_threads());
#pragma omp single
{
p=head;
while (p) {
#pragma omp task firstprivate(p) //first private is required
{
processwork(p);
}
p = p->next;
}
}
}
end = omp_get_wtime();
p = head;
while (p != NULL) {
printf("%d : %d\n",p->data, p->fibdata);
temp = p->next;
free (p);
p = temp;
}
free (p);
printf("Compute Time: %f seconds\n", end - start);
return 0;
}
|
equation_groupnorm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define ALIGNDOWN(N, A) ((N) & ~((A)-1))
#define USE_VECTORIZED_PATH 1
float upconvert_bf16(libxsmm_bfloat16 x) {
union libxsmm_bfloat16_hp bf16_hp;
bf16_hp.i[1] = x;
bf16_hp.i[0] = 0;
return bf16_hp.f;
}
void tpp_groupnorm_fwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps,
libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel,
libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel) {
LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */
LIBXSMM_VLA_DECL(5, float, out, pout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB);
int np, group_size;
group_size = (CP*CB)/G;
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, cp, cb, nb, hwb, g;
float m, v;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[5];
for (nb = 0; nb < NB; nb++) { /* [CP, nb, HW, CB] */
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
for (cp = 0; cp < CP; cp++){ /* [cp, nb, HW, CB] */
#pragma omp simd
for (int cb = 0; cb < 2*CB; cb++) {
tmp[cb] = 0.0f;
}
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
#pragma omp simd
for (cb = 0; cb < 2*CB; cb++) {
tmp[cb] += new_tmp[cb];
}
}
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
g = (cp*CB)/group_size; /* determine current group */
m_reduce_rows_params.in.primary = tmp;
m_reduce_rows_params.out.primary = &m;
v_reduce_rows_params.in.primary = &tmp[CB];
v_reduce_rows_params.out.primary = &v;
reduce_rows_kernel(&m_reduce_rows_params);
reduce_rows_kernel(&v_reduce_rows_params);
sum_X[g] += m;
sum_X2[g] += v;
}
else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(i=0; i < CB; i += group_size){
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */
mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
for (cp = 0; cp < CP; cp++){
arg_array[1].primary = &s[cp*CB]; /* [CB] */
arg_array[2].primary = &b[cp*CB]; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_groupnorm_fwd_bf16(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pinp, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pbeta, float *mean, float *var,
libxsmm_bfloat16 *pout, float eps, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary reduce_rows_kernel,
libxsmm_meltwfunction_unary reduce_groups_kernel, libxsmm_meltwfunction_unary all_zero_G_kernel) {
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, out, pout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, beta, pbeta, CB);
int np, group_size;
group_size = (CP*CB)/G;
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float tmp[2*CB], 64);
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, nb, cp, cb, g, hwb;
float m, v;
libxsmm_matrix_eqn_param eqn_param;
libxsmm_meltw_unary_param m_reduce_rows_params, m_reduce_groups_params, v_reduce_rows_params, v_reduce_groups_params, reduce_HW_params;
libxsmm_meltw_unary_param all_zero_param;
libxsmm_matrix_arg arg_array[5];
for (nb = 0; nb < NB; nb++) { /* [CP, nb, HW, CB] */
all_zero_param.out.primary = sum_X;
all_zero_G_kernel(&all_zero_param);
all_zero_param.out.primary = sum_X2;
all_zero_G_kernel(&all_zero_param);
LIBXSMM_ALIGNED(float new_tmp[2*CB], 64);
for (cp = 0; cp < CP; cp++){ /* [cp, nb, HW, CB] */
#pragma omp simd
for (cb = 0; cb < 2*CB; cb++) {
tmp[cb] = 0.0f;
}
reduce_HW_params.out.primary = new_tmp; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] -----> [2 * CB] */
reduce_HW_kernel(&reduce_HW_params);
#pragma omp simd
for (cb = 0; cb < 2*CB; cb++) {
tmp[cb] += new_tmp[cb];
}
}
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
g = (cp*CB)/group_size; /* determine current group */
m_reduce_rows_params.in.primary = tmp;
m_reduce_rows_params.out.primary = &m;
v_reduce_rows_params.in.primary = &tmp[CB];
v_reduce_rows_params.out.primary = &v;
reduce_rows_kernel(&m_reduce_rows_params);
reduce_rows_kernel(&v_reduce_rows_params);
sum_X[g] += m;
sum_X2[g] += v;
}
else{ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(i=0; i < CB; i += group_size){
m_reduce_groups_params.in.primary = &tmp[i];
m_reduce_groups_params.out.primary = &sum_X[cp*(CB/group_size) + (i/group_size)];
v_reduce_groups_params.in.primary = &tmp[CB + i];
v_reduce_groups_params.out.primary = &sum_X2[cp*(CB/group_size) + (i/group_size)];
reduce_groups_kernel(&m_reduce_groups_params);
reduce_groups_kernel(&v_reduce_groups_params);
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */
mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* 1/sqrt(var(X) + eps) */
b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* -E[X]/sqrt(var(X) + eps) */
}
}
for (cp = 0; cp < CP; cp++){
arg_array[1].primary = &s[cp*CB]; /* [CB] */
arg_array[2].primary = &b[cp*CB]; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_groupnorm_bwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, float eps) {
int group_size;
group_size = (CP*CB)/G;
const float scale = 1.0f / ((float)CP*HW*CB);
LIBXSMM_VLA_DECL(5, float, din, pdin, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, float, dout, pdout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
int np;
#pragma omp for
for (np = 0; np < NP; np++) {
int j, nb, g, cp, hwb;
for(j = 0; j < CP*CB; j++){
dgamma_NP[np*CP*CB + j] = 0.0f;
dbeta_NP[np*CP*CB + j] = 0.0f;
}
libxsmm_matrix_eqn_param eqn_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
for (nb = 0; nb < NB; nb++) {
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g];
ds[g*group_size + j] = 0.0f;
db[g*group_size + j] = 0.0f;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
/* arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */
/* arg_array[5].primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
/* arg_array[7].primary = &c[cp*CB]; */
arg_array[8].primary = &ds[cp*CB];
arg_array[9].primary = &db[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
eqn_param.output.primary = &ds[cp*CB];
ds_func(&eqn_param);
eqn_param.output.primary = &db[cp*CB];
db_func(&eqn_param);
/* eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
/* eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
/* arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, dgamma, cp, 0, CB); */
/* arg_array[5].primary = &LIBXSMM_VLA_ACCESS(2, dbeta, cp, 0, CB); */
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = &c[cp*CB];
/* arg_array[8].primary = &ds[cp*CB]; */
/* arg_array[9].primary = &db[cp*CB]; */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
din_func(&eqn_param);
}
}
}
}
int cp;
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
void tpp_groupnorm_bwd_bf16(long NP, long CP, long NB, long HW, long CB, long G, long num_HW_blocks, libxsmm_bfloat16 *pdout, libxsmm_bfloat16 *pinp, float *mean, float *var, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function db_func, libxsmm_matrix_eqn_function ds_func, libxsmm_matrix_eqn_function din_func, float eps) {
int group_size;
group_size = (CP*CB)/G;
const float scale = 1.0f / ((float)CP*HW*CB);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, din, pdin, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, inp, pinp, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, libxsmm_bfloat16, dout, pdout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
int np;
#pragma omp for
for (np = 0; np < NP; np++) {
int j, nb, g, cp, hwb;
for(j = 0; j < CP*CB; j++){
dgamma_NP[np*CP*CB + j] = 0.0f;
dbeta_NP[np*CP*CB + j] = 0.0f;
}
libxsmm_matrix_eqn_param eqn_param;
libxsmm_matrix_arg arg_array[10];
eqn_param.inputs = arg_array;
for (nb = 0; nb < NB; nb++) {
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g];
ds[g*group_size + j] = 0.0f;
db[g*group_size + j] = 0.0f;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[4].primary = &dgamma_NP[np*CP*CB + cp*CB];
arg_array[5].primary = &dbeta_NP[np*CP*CB + cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[8].primary = &ds[cp*CB];
arg_array[9].primary = &db[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
eqn_param.output.primary = &ds[cp*CB];
ds_func(&eqn_param);
eqn_param.output.primary = &db[cp*CB];
db_func(&eqn_param);
eqn_param.output.primary = &dgamma_NP[np*CP*CB + cp*CB];
dgamma_func(&eqn_param);
eqn_param.output.primary = &dbeta_NP[np*CP*CB + cp*CB];
dbeta_func(&eqn_param);
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) {
arg_array[1].primary = &a[cp*CB];
arg_array[2].primary = &b[cp*CB];
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = &c[cp*CB];
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hwb*(HW/num_HW_blocks), 0, CP, NB, HW, CB);
din_func(&eqn_param);
}
}
}
}
int cp;
#pragma omp for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
}
void scaler_groupnorm_fwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){
LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB); /* [NP, CP, NB, HW, CB] */
LIBXSMM_VLA_DECL(5, float, out, pout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB);
int np, group_size;
group_size = (CP*CB)/G;
#pragma omp parallel for
for(np = 0; np < NP; np++){
LIBXSMM_ALIGNED(float sum_X[G], 64);
LIBXSMM_ALIGNED(float sum_X2[G], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
int i, j, cp, cb, hw, nb, g;
float m, v, value;
for(nb = 0; nb < NB; nb++){
for(g = 0; g < G; g++){
sum_X[g] = 0.0f;
sum_X2[g] = 0.0f;
}
for(cp = 0; cp < CP; cp++){ /* Size = CP*HW*CB*4 */
m = 0.0f;
v = 0.0f;
if (group_size >= CB){ /* Group size >= block size (Ex.- CP = 4, CB = 16, G = 2, group_size = 32) */
for(cb = 0; cb < CB; cb++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB);
m += value;
v += (value*value);
}
}
g = (cp*CB)/group_size; /* determine current group */
sum_X[g] += m;
sum_X2[g] += v;
}
else{
for(i=0; i < CB; i += group_size){ /* Group size < block size (Ex.- CP = 4, CB = 16, G = 32, group_size = 2) */
for(j = 0; j < group_size; j++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, (i + j), CP, NB, HW, CB);
sum_X[cp*(CB/group_size) + (i/group_size)] += value;
sum_X2[cp*(CB/group_size) + (i/group_size)] += (value*value);
}
}
}
}
}
for(g = 0; g < G; g++){ /* mean and variance calculation */ /* Size = 2*CP*CB*4 */
mean[np*NB*G + nb*G + g] = sum_X[g] / ((float)group_size * HW);
var[np*NB*G + nb*G + g] = (sum_X2[g] / ((float)group_size * HW)) - (mean[np*NB*G + nb*G + g]*mean[np*NB*G + nb*G + g]); /* var = E[X^2] - (E[X])^2 [G] */
for(j = 0; j < group_size; j++){
s[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps)); /* s = 1/sqrt(var(X) + eps) [CP, CB] */
b[g*group_size + j] = -1 * mean[np*NB*G + nb*G + g] * s[g*group_size + j]; /* b = -E[X]/sqrt(var(X) + eps) [CP, CB] */
}
}
for(cp = 0; cp < CP; cp++){ /* Size = 2*CP*HW*CB*4 + 2*CP*CB*4 */
for(cb = 0; cb < CB; cb++){
for(hw = 0; hw < HW; hw++){
value = LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB);
value = ((value * s[cp*CB + cb]) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
LIBXSMM_VLA_ACCESS(5, out, np, cp, nb, hw, cb, CP, NB, HW, CB) = value;
}
}
}
} /* end loops */
} /*End multithreading loop*/
}
void scaler_groupnorm_bwd_fp32(long NP, long CP, long NB, long HW, long CB, long G, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps) {
int np, group_size;
group_size = (CP*CB)/G;
float scale = 1.0f / (CP * HW* CB);
LIBXSMM_VLA_DECL(5, float, din, pdin, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, float, inp, pinp, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(5, float, dout, pdout, CP, NB, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
LIBXSMM_ALIGNED(float dgamma_NP[NP*CP*CB], 64);
LIBXSMM_ALIGNED(float dbeta_NP[NP*CP*CB], 64);
#pragma omp parallel for
for(np = 0; np < NP; np++){
int j, nb, cp, cb, hw, g;
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
for(j = 0; j < CP*CB; j++){
dgamma_NP[np*CP*CB + j] = 0.0f;
dbeta_NP[np*CP*CB + j] = 0.0f;
}
for (nb = 0; nb < NB; nb++) {
for(g = 0; g < G; g++){ /* compute a and b for each channel from group means and variance */
for(j = 0; j < group_size; j++){
a[g*group_size + j] = 1.0f / ((float)sqrt(var[np*NB*G + nb*G + g] + eps));
b[g*group_size + j] = -a[g*group_size + j]*mean[np*NB*G + nb*G + g];
ds[g*group_size + j] = 0.0f;
db[g*group_size + j] = 0.0f;
}
}
for (cp = 0; cp < CP; cp++) { /* dgamma += (a * inp + b) * dout , dbeta += dout, ds += dout * gamma * inp, db += dout * gamma */ /* Size = 2*CP*HW*CB*4 */
for (cb = 0; cb < CB; cb++) {
for (hw = 0; hw < HW; hw++){
dgamma_NP[np*CP*CB + cp*CB + cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB);
dbeta_NP[np*CP*CB + cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB);
ds[cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB);
db[cp*CB + cb] += LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB);
}
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(g = 0; g < G; g++){ /* compute b and c for each channel from group means and variance */
float gds = 0.0f;
float gdb = 0.0f;
for(j = 0; j < group_size; j++){
gds += ds[g*group_size + j]; /* Group ds and db calculation */
gdb += db[g*group_size + j];
}
for(j = 0; j < group_size; j++){
b[g*group_size + j] = (gdb * mean[np*NB*G + nb*G + g] - gds) * a[g*group_size + j] * a[g*group_size + j] * a[g*group_size + j] * scale;
c[g*group_size + j] = -b[g*group_size + j] * mean[np*NB*G + nb*G + g] - gdb * a[g*group_size + j] * scale;
}
}
for (cp = 0; cp < CP; cp++) { /* din = dout * a * gamma + b * inp + c */ /* Size = 3*CP*HW*CB*4 */
for (cb = 0; cb < CB; cb++) {
for (hw = 0; hw < HW; hw++){
LIBXSMM_VLA_ACCESS(5, din, np, cp, nb, hw, cb, CP, NB, HW, CB) = LIBXSMM_VLA_ACCESS(5, dout, np, cp, nb, hw, cb, CP, NB, HW, CB) * a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(5, inp, np, cp, nb, hw, cb, CP, NB, HW, CB) + c[cp*CB + cb];
}
}
}
}
}
int cp;
#pragma omp parallel for
for (cp = 0; cp < CP; cp++) {
for (np=0; np < NP; np++ ) {
int cb;
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += dgamma_NP[np*CP*CB + cp*CB + cb];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += dbeta_NP[np*CP*CB + cp*CB + cb];
}
}
}
}
int main( int argc, char* argv[] ) {
libxsmm_blasint my_eqn10, my_eqn11, my_eqn12, my_eqn13, my_eqn14, my_eqn15;
libxsmm_matrix_eqn_function func10, func11, func12, func13, func14, func15;
libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
libxsmm_meltw_unary_type unary_type;
libxsmm_meltwfunction_unary reduce_rows_kernel, reduce_HW_kernel, reduce_groups_kernel;
const float eps = FLT_EPSILON;
libxsmm_blasint i, it, ld, tmp_ld, tmp_ld2;
unsigned long long l_start, l_end;
double l_total = 0, l_total2 = 0;
double t_vec = 0, t_tpp = 0;
libxsmm_matdiff_info norms_out;
float *inp, *out, *dinp, *dout, *eqn_dinp, *eqn_dout, *dbeta, *eqn_dbeta, *dgamma, *eqn_dgamma, *eqn_out, *gamma, *beta, *cache_fl, *mean, *var, sum = 0.0;
libxsmm_bfloat16 *bf16_inp, *bf16_out, *bf16_dinp, *bf16_dout, *bf16_eqn_dinp, *bf16_eqn_dout, *bf16_gamma, *bf16_beta, *bf16_eqn_out;
int NP = 28;
int CP = 2;
int NB = 1;
int HW = 784;
int CB = 64;
int G = 1;
long num_HW_blocks = 16;
int datatype_mode = 0;
int iters = 100;
libxsmm_datatype in_dt = LIBXSMM_DATATYPE_F32;
libxsmm_datatype out_dt = LIBXSMM_DATATYPE_F32;
if ( argc > 1 ) NP = atoi(argv[1]);
if ( argc > 2 ) CP = atoi(argv[2]);
if ( argc > 3 ) NB = atoi(argv[3]);
if ( argc > 4 ) HW = atoi(argv[4]);
if ( argc > 5 ) CB = atoi(argv[5]);
if ( argc > 6 ) G = atoi(argv[6]);
if ( argc > 7 ) num_HW_blocks = atoi(argv[7]);
if ( argc > 8 ) datatype_mode = atoi(argv[8]);
if ( argc > 9 ) iters = atoi(argv[9]);
if (datatype_mode == 0) {
in_dt = LIBXSMM_DATATYPE_F32;
out_dt = LIBXSMM_DATATYPE_F32;
} else if (datatype_mode == 1) {
in_dt = LIBXSMM_DATATYPE_BF16;
out_dt = LIBXSMM_DATATYPE_BF16;
} else {
printf("ERROR: Supporting only FP32 and BF16 precisions...\n");
}
inp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
eqn_dout = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
eqn_dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
gamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
beta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
mean = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*NB*G, 2097152);
var = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*NB*G, 2097152);
eqn_out = (float*) libxsmm_aligned_malloc( sizeof(float)*NP*CP*NB*HW*CB, 2097152);
cache_fl = (float*) libxsmm_aligned_malloc( sizeof(float)*1024*1024, 2097152);
bf16_inp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_eqn_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_eqn_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
bf16_gamma = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_beta = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_eqn_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*NP*CP*NB*HW*CB, 2097152);
libxsmm_init();
libxsmm_matdiff_clear(&norms_out);
/* Initializing arrays */
for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) {
inp[i] = (float)libxsmm_rng_f64();
out[i] = (float)libxsmm_rng_f64();
eqn_out[i] = out[i];
dinp[i] = (float)libxsmm_rng_f64();
dout[i] = (float)libxsmm_rng_f64();
eqn_dinp[i] = dinp[i];
eqn_dout[i] = dout[i];
libxsmm_rne_convert_fp32_bf16( &inp[i], &bf16_inp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &out[i], &bf16_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_out[i], &bf16_eqn_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dout[i], &bf16_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dout[i], &bf16_eqn_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dinp[i], &bf16_dinp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dinp[i], &bf16_eqn_dinp[i], 1 );
}
for ( i = 0; i < CP*CB; ++i ) {
gamma[i] = (float)libxsmm_rng_f64();
beta[i] = (float)libxsmm_rng_f64();
dbeta[i] = (float)libxsmm_rng_f64();
dgamma[i] = (float)libxsmm_rng_f64();
eqn_dbeta[i] = dbeta[i];
eqn_dgamma[i] = dgamma[i];
libxsmm_rne_convert_fp32_bf16( &gamma[i], &bf16_gamma[i], 1 );
libxsmm_rne_convert_fp32_bf16( &beta[i], &bf16_beta[i], 1 );
}
for (i = 0; i < 1024 * 1024; i++ ) {
cache_fl[i] = (float)libxsmm_rng_f64();
}
libxsmm_blasint ldo = G;
libxsmm_meltwfunction_unary all_zero_G_kernel = libxsmm_dispatch_meltw_unary(G, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( all_zero_G_kernel == NULL) {
fprintf( stderr, "JIT for initialization by unary all zero group copy kernel failed. Bailing...!\n");
exit(-1);
}
/* TPPs for reducing X and X2 in HW*/
ld = CB;
tmp_ld = CB;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
reduce_HW_kernel = libxsmm_dispatch_meltw_unary(CB, HW/num_HW_blocks, &ld, &tmp_ld, in_dt, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
/* TPP for reducing groups */
libxsmm_blasint group_size = (CP*CB)/G;
ld = group_size; /* group_size = (CP*CB)/G */
tmp_ld = 1;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
reduce_groups_kernel = libxsmm_dispatch_meltw_unary(group_size, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
ld = CB;
tmp_ld = 1;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS;
reduce_rows_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ld, &tmp_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
/* TPP for scaling */
ld = CB;
tmp_ld = 1;
tmp_ld2 = 1;
my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* x = [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 1, 0, LIBXSMM_DATATYPE_F32 ); /* s = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 3, 0, in_dt ); /* gamma = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 4, 0, in_dt ); /* beta = [CB] */
func10 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, out_dt, my_eqn10 ); /* y = [HW, CB] */
/* Check correctness */
if (datatype_mode == 0) {
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
} else if (datatype_mode == 1) {
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) {
/* out[i] = upconvert_bf16(bf16_out[i]); */
eqn_out[i] = upconvert_bf16(bf16_eqn_out[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 FWD Groupnorm - Output #\n");
} else {
printf("# Correctness BF16 FWD Groupnorm - Output #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*NB*HW*CB, 1, out, eqn_out, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler time FWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP groupnorm time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_fwd_fp32(NP, CP, NB, HW, CB, G, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler FP32 groupnorm time FWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_fwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, reduce_rows_kernel, reduce_groups_kernel, all_zero_G_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP BF16 groupnorm time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
}
t_tpp = l_total2;
t_vec = l_total;
/* Group norm equations */
/* Create MatEq for bwd layernorm */
ld = CB;
tmp_ld2 = 1;
/* dgamma function */
my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn11, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* ((inp *a + b) * dout) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn11, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 4, 0, LIBXSMM_DATATYPE_F32 ); /* dgamma [CB] */
func11 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn11 ); /* dgamma [CB] */
/* dbeta function */
my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [CB] = dout [HW, CB] + dbeta [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn12, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* dbeta_tmp [HW, CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn12, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, 1, 1, 5, 0, LIBXSMM_DATATYPE_F32 ); /* dbeta [CB] */
func12 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn12 ); /* dbeta [CB] */
/* db new equation */
my_eqn13 = libxsmm_matrix_eqn_create(); /* db [CB] = (dout * gamma) [HW, CB] + db [CB]*/
libxsmm_matrix_eqn_push_back_binary_op(my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* db [CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn13, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn13, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn13, CB, 1, 1, 9, 0, LIBXSMM_DATATYPE_F32 ); /* db [CB] */
func13 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn13 ); /* db [CB] */
/* ds new equation */
my_eqn14 = libxsmm_matrix_eqn_create(); /* ds [CB] = ((dout * gamma) * inp) [HW, CB] + ds [CB] */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn14, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_binary_op( my_eqn14, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_BCAST_COL_IN_1, LIBXSMM_DATATYPE_F32 ); /*(dout * gamma)*/
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn14, CB, 1, 1, 8, 0, LIBXSMM_DATATYPE_F32 ); /* ds [CB] */
func14 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn14 ); /* ds [CB] */
/* din equation */
my_eqn15 = libxsmm_matrix_eqn_create(); /* din = ((gamma * a) * dout) + (inp * b + c) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_binary_op( my_eqn15, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 6, 0, in_dt ); /* gamma [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn15, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn15, CB, 1, 1, 7, 0, LIBXSMM_DATATYPE_F32 ); /* c [CB] */
func15 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, in_dt, my_eqn15 ); /* din [HW, CB] */
if (datatype_mode == 0) {
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps);
} else if (datatype_mode == 1) {
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps);
for ( i = 0; i < NP*CP*NB*HW*CB; ++i ) {
/* dinp[i] = upconvert_bf16(bf16_dinp[i]); */
eqn_dinp[i] = upconvert_bf16(bf16_eqn_dinp[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dinput #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dinput #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, NP*CP*NB*HW*CB, 1, dinp, eqn_dinp, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("###########################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dbeta #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dbeta #\n");
}
printf("###########################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dbeta, eqn_dbeta, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Groupnorm - Dgamma #\n");
} else {
printf("# Correctness BF16 BWD Groupnorm - Dgamma #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dgamma, eqn_dgamma, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler groupnorm time BWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func13, func14, func15, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP groupnorm time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_groupnorm_bwd_fp32(NP, CP, NB, HW, CB, G, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler FP32 groupnorm time BWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_groupnorm_bwd_bf16(NP, CP, NB, HW, CB, G, num_HW_blocks, bf16_dout, bf16_inp, mean, var, bf16_gamma, bf16_dinp, dgamma, dbeta, func11, func12, func13, func14, func15, eps);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP BF16 groupnorm time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
}
/* printf("Running sum is %.5f\n", sum); */
t_tpp += l_total2;
t_vec += l_total;
printf("\n\n=================================\n");
printf("Total Speedup via TPP Matrix equation is %.5g\n", t_vec/t_tpp);
printf("=================================\n");
libxsmm_free(inp);
libxsmm_free(out);
libxsmm_free(dinp);
libxsmm_free(dout);
libxsmm_free(eqn_dinp);
libxsmm_free(eqn_dout);
libxsmm_free(bf16_dinp);
libxsmm_free(bf16_dout);
libxsmm_free(bf16_eqn_dinp);
libxsmm_free(bf16_eqn_dout);
libxsmm_free(dgamma);
libxsmm_free(dbeta);
libxsmm_free(eqn_dgamma);
libxsmm_free(eqn_dbeta);
libxsmm_free(mean);
libxsmm_free(var);
libxsmm_free(gamma);
libxsmm_free(beta);
libxsmm_free(eqn_out);
libxsmm_free(bf16_inp);
libxsmm_free(bf16_out);
libxsmm_free(bf16_gamma);
libxsmm_free(bf16_beta);
libxsmm_free(bf16_eqn_out);
libxsmm_free(cache_fl);
return 0;
}
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
#ifndef OMP_SCHED
#define OMP_SCHED static
#endif
int blas_server_avail = 0;
static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER];
#ifdef HAVE_C11
static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#else
static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#endif
void goto_set_num_threads(int num_threads) {
int i=0, j=0;
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
//adjust buffer for each thread
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_cpu_number; j++){
if(blas_thread_buffer[i][j]==NULL){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
}
for(; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
int i=0, j=0;
blas_get_cpu_number();
blas_server_avail = 1;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_num_threads; j++){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
for(; j<MAX_CPU_NUMBER; j++){
blas_thread_buffer[i][j]=NULL;
}
}
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
int i=0, j=0;
blas_server_avail = 0;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if ((mode & BLAS_PREC) == BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if ((mode & BLAS_PREC) == BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else if ((mode & BLAS_PREC) == BLAS_SINGLE){
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
#ifdef BUILD_HALF
} else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){
/* REAL / BFLOAT16 */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16,
bfloat16 *, BLASLONG, bfloat16 *, BLASLONG,
bfloat16 *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((bfloat16 *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else if ((mode & BLAS_PREC) == BLAS_STOBF16){
/* REAL / BLAS_STOBF16 */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, bfloat16 *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else if ((mode & BLAS_PREC) == BLAS_DTOBF16){
/* REAL / BLAS_DTOBF16 */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, bfloat16 *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
#endif
} else {
/* REAL / Other types in future */
}
} else {
#ifdef EXPRECISION
if ((mode & BLAS_PREC) == BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if ((mode & BLAS_PREC) == BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else if ((mode & BLAS_PREC) == BLAS_SINGLE){
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Other types in future */
}
}
}
static void exec_threads(blas_queue_t *queue, int buf_index){
void *buffer, *sa, *sb;
int pos=0, release_flag=0;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
pos = omp_get_thread_num();
buffer = blas_thread_buffer[buf_index][pos];
//fallback
if(buffer==NULL) {
buffer = blas_memory_alloc(2);
release_flag=1;
}
if (sa == NULL) {
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
queue->sa=sa;
}
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE){
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
/* Other types in future */
}
} else {
#ifdef EXPRECISION
if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
/* Other types in future */
}
}
queue->sb=sb;
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (release_flag) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i, buf_index;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
while(true) {
for(i=0; i < MAX_PARALLEL_NUMBER; i++) {
#ifdef HAVE_C11
_Bool inuse = false;
if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) {
#else
if(blas_buffer_inuse[i] == false) {
blas_buffer_inuse[i] = true;
#endif
buf_index = i;
break;
}
}
if(i != MAX_PARALLEL_NUMBER)
break;
}
#pragma omp parallel for num_threads(num) schedule(OMP_SCHED)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i], buf_index);
}
#ifdef HAVE_C11
atomic_store(&blas_buffer_inuse[buf_index], false);
#else
blas_buffer_inuse[buf_index] = false;
#endif
return 0;
}
#endif
|
Pragma.h | //===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===//
//
// The LLVM37 Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PragmaHandler and PragmaTable interfaces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM37_CLANG_LEX_PRAGMA_H
#define LLVM37_CLANG_LEX_PRAGMA_H
#include "clang/Basic/LLVM.h"
#include "llvm37/ADT/StringMap.h"
#include "llvm37/ADT/StringRef.h"
#include <cassert>
namespace clang {
class Preprocessor;
class Token;
class IdentifierInfo;
class PragmaNamespace;
/**
* \brief Describes how the pragma was introduced, e.g., with \#pragma,
* _Pragma, or __pragma.
*/
enum PragmaIntroducerKind {
/**
* \brief The pragma was introduced via \#pragma.
*/
PIK_HashPragma,
/**
* \brief The pragma was introduced via the C99 _Pragma(string-literal).
*/
PIK__Pragma,
/**
* \brief The pragma was introduced via the Microsoft
* __pragma(token-string).
*/
PIK___pragma
};
/// PragmaHandler - Instances of this interface defined to handle the various
/// pragmas that the language front-end uses. Each handler optionally has a
/// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with
/// that identifier is found. If a handler does not match any of the declared
/// pragmas the handler with a null identifier is invoked, if it exists.
///
/// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g.
/// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other
/// pragmas.
class PragmaHandler {
std::string Name;
public:
explicit PragmaHandler(StringRef name) : Name(name) {}
PragmaHandler() {}
virtual ~PragmaHandler();
StringRef getName() const { return Name; }
virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
virtual PragmaNamespace *getIfNamespace() { return nullptr; }
};
/// EmptyPragmaHandler - A pragma handler which takes no action, which can be
/// used to ignore particular pragmas.
class EmptyPragmaHandler : public PragmaHandler {
public:
EmptyPragmaHandler();
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
};
/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
/// allowing hierarchical pragmas to be defined. Common examples of namespaces
/// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces
/// may be (potentially recursively) defined.
class PragmaNamespace : public PragmaHandler {
/// Handlers - This is a map of the handlers in this namespace with their name
/// as key.
///
llvm37::StringMap<PragmaHandler*> Handlers;
public:
explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {}
~PragmaNamespace() override;
/// FindHandler - Check to see if there is already a handler for the
/// specified name. If not, return the handler for the null name if it
/// exists, otherwise return null. If IgnoreNull is true (the default) then
/// the null handler isn't returned on failure to match.
PragmaHandler *FindHandler(StringRef Name,
bool IgnoreNull = true) const;
/// AddPragma - Add a pragma to this namespace.
///
void AddPragma(PragmaHandler *Handler);
/// RemovePragmaHandler - Remove the given handler from the
/// namespace.
void RemovePragmaHandler(PragmaHandler *Handler);
bool IsEmpty() {
return Handlers.empty();
}
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &FirstToken) override;
PragmaNamespace *getIfNamespace() override { return this; }
};
} // end namespace clang
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/LocInfoType.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
// HLSL Change Starts
#include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant
namespace hlsl {
struct UnusualAnnotation;
}
// HLSL Change Ends
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPClause;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
class CXXThisExpr; // HLSL Change
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
if (getLangOpts().ModulesHideInternalLinkage)
return isVisible(Old) || New->isExternallyVisible();
return true;
}
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
// HLSL Change Begin
// The HLSL rewriter doesn't define a default matrix pack,
// so we must preserve the lack of annotations to avoid changing semantics.
bool HasDefaultMatrixPack = false;
// Uses of #pragma pack_matrix change the default pack.
bool DefaultMatrixPackRowMajor = false;
// HLSL Change End.
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
// std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
unsigned deduceWeakPropertyFromType(QualType T) {
if ((getLangOpts().getGC() != LangOptions::NonGC &&
T.isObjCGCWeak()) ||
(getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_Weak))
return ObjCDeclSpec::DQ_PR_weak;
return 0;
}
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
// HLSL Change - FIX - We should move param mods to parameter QualTypes
QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI,
ArrayRef<hlsl::ParameterModifier> ParamMods);
// HLSL Change - End
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
bool Suppressed;
TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { }
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
if (Suppressed)
return;
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false,
bool HasTrailingDot = false,
ParsedType ObjectType = ParsedType(),
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *)
: Kind(NC_Keyword) {
}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
// HLSL Change Starts
// This enumeration is used to determine whether a variable declaration
// should shadow a prior declaration rather than merging.
enum ShadowMergeState {
ShadowMergeState_Disallowed, // shadowing is not allowed
ShadowMergeState_Possible, // shadowing is possible (but may not occur)
ShadowMergeState_Effective // the declaration should shadow a prior one
};
// HLSL Change Ends
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition =
nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation = false);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool Override,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override
};
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
// Emit as a series of 'note's all template and non-templates
// identified by the expression Expr
void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
const SourceRange& OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
// An enum to represent whether something is dealing with a call to begin()
// or a call to end() in a range-based for loop.
enum BeginEndFunction {
BEF_begin,
BEF_end
};
ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc,
SourceLocation RangeLoc,
VarDecl *Decl,
BeginEndFunction BEF,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
unsigned Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
bool *isOverridingProperty,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isAssign,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
StringRef message);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
const SourceRange &ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
// HLSL Change Begins
bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc,
UnaryExprOrTypeTrait ExprKind);
// HLSL Change Ends
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
OffsetOfComponent *CompPtr,
unsigned NumComponents,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
// HLSL Change Starts
//===---------------------------- HLSL Features -------------------------===//
/// cbuffer/tbuffer
llvm::SmallVector<Decl*, 1> HLSLBuffers;
Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc,
IdentifierInfo *Ident, SourceLocation IdentLoc,
std::vector<hlsl::UnusualAnnotation *>& BufferAttributes,
SourceLocation LBrace);
void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
Decl* getActiveHLSLBuffer() const;
void ActOnStartHLSLBufferView();
bool IsOnHLSLBufferView();
Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc,
DeclGroupPtrTy &dcl, bool iscbuf);
// HLSL Change Ends
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
QualType performLambdaInitCaptureInitialization(SourceLocation Loc,
bool ByRef, IdentifierInfo *Id, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType, IdentifierInfo *Id, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
Expr **Strings,
unsigned NumStrings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
ObjCDictionaryElement *Elements,
unsigned NumElements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXMemberDefaultArgs(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
unsigned NumBases);
void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
unsigned NumBases);
bool IsDerivedFrom(QualType Derived, QualType Base);
bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
AbstractDiagSelID SelID = AbstractNone);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool NextIsLess = false); // HLSL Change - additional special case flag
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
Decl **Params, unsigned NumParams,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief The stack of calls expression undergoing template instantiation.
///
/// The top of this stack is used by a fixit instantiating unresolved
/// function calls to fix the AST to match the textual change it prints.
SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(),
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param NumExprs The number of expressions in \p Exprs.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
const IdentifierLocPair *IdentList,
unsigned NumElts,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
const IdentifierLocPair *ProtocolId,
unsigned NumProtocols,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
/// \param CD The semantic container for the property
/// \param redeclaredProperty Declaration for property if redeclared
/// in class extension.
/// \param lexicalDC Container for redeclaredProperty.
void ProcessPropertyDecl(ObjCPropertyDecl *property,
ObjCContainerDecl *CD,
ObjCPropertyDecl *redeclaredProperty = nullptr,
ObjCContainerDecl *lexicalDC = nullptr);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
bool *OverridingProperty,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
enum PragmaMSStructKind {
PMSST_OFF, // #pragms ms_struct off
PMSST_ON // #pragms ms_struct on
};
enum PragmaMSCommentKind {
PCK_Unknown,
PCK_Linker, // #pragma comment(linker, ...)
PCK_Lib, // #pragma comment(lib, ...)
PCK_Compiler, // #pragma comment(compiler, ...)
PCK_ExeStr, // #pragma comment(exestr, ...)
PCK_User // #pragma comment(user, ...)
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...).
void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
// OpenMP directives and clauses.
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op,
OpenMPClauseKind CKind);
public:
/// \brief Check if the specified variable is used in a private clause in
/// Checks if the specified variable is used in one of the private
/// clauses in OpenMP constructs.
bool IsOpenMPCapturedVar(VarDecl *VD);
/// OpenMP constructs.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
unsigned Argument, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation KindLoc,
SourceLocation CommaLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList,
Expr *Step,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and prepare for a conversion of the
/// RHS to the LHS type.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
unsigned NumProtocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
// HLSL Change Starts - checking array subscript access to vector or matrix member
void CheckHLSLArrayAccess(const Expr *expr);
// HLSL Change ends
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinCpuSupports(CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg,
FormatStringType Type, bool inFunctionCall,
VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs);
bool FormatStringHasSArg(const StringLiteral *FExpr);
bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
// HLSL Change Starts
bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter);
bool DiagnoseHLSLLookup(const LookupResult &R);
void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl);
// HLSL Change Ends
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// HLSL Change Begin - adjust this from T* to T&-like
CXXThisExpr *genereateHLSLThis(SourceLocation Loc, QualType ThisType,
bool isImplicit);
// HLSL Change End - adjust this from T* to T&-like
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
mv_tools.h | #ifndef __MV_TOOLS_H
#define __MV_TOOLS_H
namespace blue_sky
{
namespace bos_helper
{
namespace helper
{
template <typename T> struct is_int
{
enum { value = 0};
};
template <> struct is_int <int>
{
enum { value = 1};
};
template <> struct is_int <unsigned int>
{
enum { value = 1};
};
template <> struct is_int <long>
{
enum { value = 1};
};
template <> struct is_int <unsigned long>
{
enum { value = 1};
};
}
template <class vector_v1_t, class vector_v2_t> inline typename vector_v1_t::value_type
mv_vector_inner_product (const vector_v1_t &v1, const vector_v2_t &v2, int /* obsolete */ = 0)
{
BOOST_STATIC_ASSERT (helper::is_int <typename vector_v1_t::value_type>::value == 0);
BOOST_STATIC_ASSERT (helper::is_int <typename vector_v2_t::value_type>::value == 0);
typename vector_v1_t::value_type sum = 0;
size_t i = 0;
size_t n = v1.size ();
size_t n2 = n - (n % 4);
BS_ASSERT (v1.size () == v2.size ());
#ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL
#pragma omp parallel for reduction (+: sum)
#endif //MV_VECTOR_INNER_PRODUCT_PARALLEL
for (i = 0; i < n2; i+=4)
{
sum += v1[i + 0] * v2[i + 0];
sum += v1[i + 1] * v2[i + 1];
sum += v1[i + 2] * v2[i + 2];
sum += v1[i + 3] * v2[i + 3];
}
for (; i < n; ++i)
{
sum += v1[i] * v2[i];
}
return sum;
}
#ifdef _MPI
template <class T> inline T
mv_vector_inner_product (const mpi_vector <T> &v1, const mpi_vector <T> &v2, int /* obsolete */ = 0)
{
const typename mpi_vector <T>::vector_t &v1_val = v1.get_local_part ();
const typename mpi_vector <T>::vector_t &v2_val = v2.get_local_part ();
BS_ASSERT (v1.size () == v2.size ());
BS_ASSERT (v1.size ());
BS_ASSERT (v1_val.size () == v2_val.size ());
BS_ASSERT (v1_val.size ());
double local_res, res;
local_res = res = 0.0;
for (int i = 0, n_local = (int)v1_val.size (); i < n_local; i++)
local_res += v1_val[i] * v2_val[i];
MPI_Allreduce (&local_res, &res, 1, mpi_type_t<T>::value, MPI_SUM, MPI_COMM_WORLD);
return res;
}
#endif
} // namespace bos_helper
template <class strategy_t>
struct mv_tools
{
typedef typename strategy_t::item_t fp_type;
typedef typename strategy_t::item_array_t item_array_t;
static inline fp_type
mv_vector_inner_product2 (const fp_type *v1, const fp_type *v2, const int n)
{
int i, istart = 0, iend = n;
fp_type sum = 0; // double
#ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL
int thread_num, n_threads;
fp_type total_sum = 0; // double
#pragma omp parallel private (i, sum, thread_num, n_threads, istart, iend)
{
sum = 0;
thread_num = omp_get_thread_num ();
n_threads = omp_get_max_threads ();
istart = thread_num * n / n_threads;
iend = (thread_num + 1) * n / n_threads;
#endif //MV_VECTOR_INNER_PRODUCT_PARALLEL
for (i = istart; i < iend; ++i)
sum += v1[i] * v2[i];
#ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL
#pragma omp atomic
total_sum += sum;
} //end parallel
sum = total_sum;
#endif //MV_VECTOR_INNER_PRODUCT_PARALLEL
return sum;
}
// r = a + cf1 * b + cf2 * c
static inline void
mv_lin_comb_1 (const int n, const fp_type cf1, const fp_type cf2, const fp_type *a,
const fp_type *b, const fp_type *c, fp_type *r)
{
int i;
#ifdef OTHER_NON_IMPORTANT_PARALLEL
#pragma omp parallel for
#endif //OTHER_NON_IMPORTANT_PARALLEL
for (i = 0; i < n; ++i)
r[i] = a[i] + cf1 * b[i] + cf2 * c[i];
}
// r = a + cf * b
static inline void
mv_lin_comb_2 (const int n, const fp_type cf, const fp_type *a, const fp_type *b, fp_type *r)
{
int i;
#ifdef OTHER_NON_IMPORTANT_PARALLEL
#pragma omp parallel for
#endif //OTHER_NON_IMPORTANT_PARALLEL
for (i = 0; i < n; ++i)
r[i] = a[i] + cf * b[i];
}
static inline void
mv_set (const int n, const fp_type *a, fp_type *r)
{
int i;
for (i = 0; i < n; ++i)
r[i] = a[i];
}
static inline void
mv_update_solution (const int n, const int k, const int m, fp_type *h, fp_type *x, fp_type *s, fp_type *v)
{
int i, j;
fp_type *cur_h; //double
fp_type d; // double
// Backsolve:
for (i = k; i >= 0; --i)
{
cur_h = h + (m + 1) * i;
d = s[i] / cur_h[i];
for (j = i - 1; j >= 0; --j)
s[j] -= cur_h[j] * d;
s[i] = d;
}
for (j = 0; j <= k; ++j)
{
// x += alpha * phat
mv_lin_comb_2 (n, s[j], x, v + n * j, x);
}
}
static inline void
mv_vector_print (const fp_type *v, const int n)
{
int i;
for (i = 0; i < n; ++i)
printf ("%d \t-- %lf\n", i, v[i]);
}
static inline void
mv_vector_print_file (const fp_type *v, const int n, const char *name)
{
#ifndef UFA_SOLVER
// TODO: IMPL
#else
FILE *f;
int i;
f = fopen (name, "w");
for (i = 0; i < n; ++i)
fprintf (f, "%d \t-- %30.20lf\n", i, v[i]);
fclose (f);
#endif
}
}; // mv_tools
} // namespace blue_sky
#endif //__MV_TOOLS_H
|
2182.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
{
/* E := A*B */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
target_implicit_partial_map.c | // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu
// RUN: %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu
// RUN: %libomptarget-run-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
//
// END.
#include <omp.h>
#include <stdio.h>
int main() {
int arr[100];
#pragma omp target data map(alloc: arr[50:2]) // partially mapped
{
#pragma omp target // would implicitly map with full size but already present
{
arr[50] = 5;
arr[51] = 6;
} // must treat as present (dec ref count) even though full size not present
} // wouldn't delete if previous ref count dec didn't happen
// CHECK: still present: 0
fprintf(stderr, "still present: %d\n",
omp_target_is_present(&arr[50], omp_get_default_device()));
return 0;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
GB_unop__identity_fp64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_fc32
// op(A') function: GB_unop_tran__identity_fp64_fc32
// C type: double
// A type: GxB_FC32_t
// cast: double cij = (double) crealf (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) crealf (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) crealf (aij) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_fc32
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
double z = (double) crealf (aij) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
double z = (double) crealf (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dgemm.c | #include "cblas.h"
#include "dgemm_versions.h"
#include <assert.h>
#include <omp.h>
#include <stdio.h>
// #ifndef MYLIB_NUM_THREADS
// #define MYLIB_NUM_THREADS 10
// #endif
#define BLOCK_SIZE 2
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
const int K, const double alpha, const double *A,
const int lda, const double *B, const int ldb,
const double beta, double *C, const int ldc)
{
assert(Order == CblasColMajor);
assert(TransA == CblasTrans);
assert(TransB == CblasNoTrans);
// multiply C by beta
#pragma omp parallel for collapse(2)
for (int col = 0; col < N; col++) {
for (int row = 0; row < M; row++) {
C[ldc * col + row] = beta * C[ldc * col + row];
}
}
int nbRowBlocks = M/BLOCK_SIZE + (M%BLOCK_SIZE != 0 ? 1 : 0);
int nbColBlocks = N/BLOCK_SIZE + (N%BLOCK_SIZE != 0 ? 1 : 0);
int nbKBlocks = K/BLOCK_SIZE + (K%BLOCK_SIZE != 0 ? 1 : 0);
#pragma omp parallel for collapse(2)
for (int row = 0; row < nbRowBlocks; row++) {
for (int col = 0; col < nbColBlocks; col++) {
int rowStart = row * BLOCK_SIZE;
int rowEnd = min((row+1) * BLOCK_SIZE, M);
int colStart = col * BLOCK_SIZE;
int colEnd = min((col+1) * BLOCK_SIZE, N);
for (int k = 0; k < nbKBlocks; k++) {
int kStart = k * BLOCK_SIZE;
int kEnd = min((k+1) * BLOCK_SIZE, K);
cblas_dgemmScalarjik(Order, TransA, TransB,
rowEnd-rowStart, colEnd-colStart, kEnd-kStart, alpha,
&A[lda*rowStart + kStart], lda,
&B[ldb*colStart + kStart], ldb,
1, &C[ldc*colStart + rowStart], ldc);
}
}
}
}
|
GB_binop__iseq_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_uint32
// A.*B function (eWiseMult): GB_AemultB__iseq_uint32
// A*D function (colscale): GB_AxD__iseq_uint32
// D*A function (rowscale): GB_DxB__iseq_uint32
// C+=B function (dense accum): GB_Cdense_accumB__iseq_uint32
// C+=b function (dense accum): GB_Cdense_accumb__iseq_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint32
// C=scalar+B GB_bind1st__iseq_uint32
// C=scalar+B' GB_bind1st_tran__iseq_uint32
// C=A+scalar GB_bind2nd__iseq_uint32
// C=A'+scalar GB_bind2nd_tran__iseq_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT32 || GxB_NO_ISEQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bxnor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxnor_int64
// A.*B function (eWiseMult): GB_AemultB__bxnor_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bxnor_int64
// C+=b function (dense accum): GB_Cdense_accumb__bxnor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int64
// C=scalar+B GB_bind1st__bxnor_int64
// C=scalar+B' GB_bind1st_tran__bxnor_int64
// C=A+scalar GB_bind2nd__bxnor_int64
// C=A'+scalar GB_bind2nd_tran__bxnor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ~((x) ^ (y)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxnor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bxnor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxnor_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxnor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxnor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB_bind1st_tran__bxnor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB_bind2nd_tran__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include <PeriodicFMM/FMMWrapper-c.h>
#include <PeriodicFMM/FMMWrapperWall2D-c.h>
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank = 0, size = 0;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
const int nsrc = 16384;
const int ntrg = 16384;
double *srcCoord = malloc(sizeof(double) * 3 * nsrc);
double *srcValue = malloc(sizeof(double) * 3 * nsrc);
double *trgCoord = malloc(sizeof(double) * 3 * ntrg);
double *trgValue = malloc(sizeof(double) * 3 * ntrg);
// some arbitrary data
#pragma omp parallel for
for (int i = 0; i < nsrc; i++) {
int seed = rank * nsrc + i;
srcCoord[3 * i] = fabs(sin(seed));
srcCoord[3 * i + 1] = fabs(cos(seed));
srcCoord[3 * i + 2] = fabs(sin(seed * seed));
srcValue[3 * i] = sin(seed);
srcValue[3 * i + 1] = sin(sin(seed));
srcValue[3 * i + 2] = cos(sin(seed));
}
#pragma omp parallel for
for (int i = 0; i < ntrg; i++) {
int seed = rank * nsrc + i;
trgCoord[3 * i] = fabs(cos(seed));
trgCoord[3 * i + 1] = fabs(sin(seed));
trgCoord[3 * i + 2] = fabs(cos(seed * seed));
trgValue[3 * i] = 0;
trgValue[3 * i + 1] = 0;
trgValue[3 * i + 2] = 0;
}
MPI_Barrier(MPI_COMM_WORLD);
// FMM_Wrapper
// Evaluate, clear, and Evaluate again
{
FMM_Wrapper *fmm = create_fmm_wrapper(12, 2000, 0, 7, 0);
FMM_SetBox(fmm, 0, 1, 0, 1, 0, 1);
FMM_UpdateTree(fmm, trgCoord, srcCoord, ntrg, nsrc);
FMM_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
FMM_DataClear(fmm);
FMM_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
delete_fmm_wrapper(fmm);
}
// FMM_WrapperWall2D
// Evaluate, clear, and Evaluate again
{
#pragma omp parallel for
for (int i = 0; i < nsrc; i++) {
srcCoord[3 * i + 2] *= 0.499;
}
#pragma omp parallel for
for (int i = 0; i < ntrg; i++) {
trgCoord[3 * i + 2] *= 0.499;
}
FMM_WrapperWall2D *fmm = create_fmm_wrapperwall2d(12, 2000, 0, 4);
FMMWall2D_SetBox(fmm, 0, 1, 0, 1, 0, 0.4999);
FMMWall2D_UpdateTree(fmm, trgCoord, srcCoord, ntrg, nsrc);
FMMWall2D_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
FMMWall2D_DataClear(fmm);
FMMWall2D_Evaluate(fmm, trgValue, srcValue, ntrg, nsrc);
delete_fmm_wrapperwall2d(fmm);
}
free(srcCoord);
free(trgCoord);
free(srcValue);
free(trgValue);
MPI_Finalize();
return 0;
}
|
GB_unaryop__lnot_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_int16
// op(A') function: GB_tran__lnot_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_int16
(
uint64_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__fmod_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32)
// C=scalar+B GB (_bind1st__fmod_fp32)
// C=scalar+B' GB (_bind1st_tran__fmod_fp32)
// C=A+scalar GB (_bind2nd__fmod_fp32)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmodf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_select_phase2.c | //------------------------------------------------------------------------------
// GB_select_phase2: C=select(A,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
// if A is iso and the op is user-defined, Ax [0] is passed to the user
// selectop
const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ;
size_t asize = A->type->size ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
// if A is bitmap, the bitmap selector is always used instead
ASSERT (!GB_IS_BITMAP (A)) ;
#ifndef GB_DIAG_SELECTOR
// if A is full, all opcodes except DIAG use the bitmap selector instead
ASSERT (!GB_IS_FULL (A)) ;
#endif
const int64_t *restrict kfirst_Aslice = A_ek_slicing ;
const int64_t *restrict klast_Aslice = A_ek_slicing + A_ntasks ;
const int64_t *restrict pstart_Aslice = A_ek_slicing + A_ntasks * 2 ;
//--------------------------------------------------------------------------
// C = select (A)
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
//----------------------------------------------------------------------
// selection from vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t pA_start, pA_end, pC ;
GB_get_pA_and_pC (&pA_start, &pA_end, &pC, tid, k, kfirst, klast,
pstart_Aslice, Cp_kfirst, Cp, avlen, Ap, avlen) ;
//------------------------------------------------------------------
// compact Ai and Ax [pA_start ... pA_end-1] into Ci and Cx
//------------------------------------------------------------------
#if defined ( GB_ENTRY_SELECTOR )
int64_t j = GBH (Ah, k) ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// A is never full; that case is now handled by the
// bitmap selector instead.
ASSERT (Ai != NULL) ;
int64_t i = Ai [pA] ;
GB_TEST_VALUE_OF_ENTRY (keep, pA) ;
if (keep)
{
ASSERT (pC >= Cp [k] && pC < Cp [k+1]) ;
Ci [pC] = i ;
// Cx [pC] = Ax [pA] ;
GB_SELECT_ENTRY (Cx, pC, Ax, pA) ;
pC++ ;
}
}
#elif defined ( GB_TRIL_SELECTOR ) || \
defined ( GB_ROWGT_SELECTOR )
// keep Zp [k] to pA_end-1
int64_t p = GB_IMAX (Zp [k], pA_start) ;
int64_t mynz = pA_end - p ;
if (mynz > 0)
{
// A and C are both sparse or hypersparse
ASSERT (pA_start <= p && p + mynz <= pA_end) ;
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
ASSERT (Ai != NULL) ;
memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ;
#if !GB_ISO_SELECT
memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ;
#endif
}
#elif defined ( GB_TRIU_SELECTOR ) || \
defined ( GB_ROWLE_SELECTOR )
// keep pA_start to Zp[k]-1
int64_t p = GB_IMIN (Zp [k], pA_end) ;
int64_t mynz = p - pA_start ;
if (mynz > 0)
{
// A and C are both sparse or hypersparse
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
ASSERT (Ai != NULL) ;
memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ;
#if !GB_ISO_SELECT
memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ;
#endif
}
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
// A can be sparse or full, but not bitmap
int64_t p = Zp [k] ;
if (pA_start <= p && p < pA_end)
{
ASSERT (pC >= Cp [k] && pC + 1 <= Cp [k+1]) ;
Ci [pC] = GBI (Ai, p, avlen) ;
#if !GB_ISO_SELECT
memcpy (Cx +pC*asize, Ax +p*asize, asize) ;
#endif
}
#elif defined ( GB_OFFDIAG_SELECTOR ) || \
defined ( GB_ROWINDEX_SELECTOR )
// keep pA_start to Zp[k]-1
int64_t p = GB_IMIN (Zp [k], pA_end) ;
int64_t mynz = p - pA_start ;
if (mynz > 0)
{
// A and C are both sparse or hypersparse
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
ASSERT (Ai != NULL) ;
memcpy (Ci +pC, Ai +pA_start, mynz*sizeof (int64_t)) ;
#if !GB_ISO_SELECT
memcpy (Cx +pC*asize, Ax +pA_start*asize, mynz*asize) ;
#endif
pC += mynz ;
}
// keep Zp[k]+1 to pA_end-1
p = GB_IMAX (Zp [k]+1, pA_start) ;
mynz = pA_end - p ;
if (mynz > 0)
{
// A and C are both sparse or hypersparse
ASSERT (pA_start <= p && p < pA_end) ;
ASSERT (pC >= Cp [k] && pC + mynz <= Cp [k+1]) ;
ASSERT (Ai != NULL) ;
memcpy (Ci +pC, Ai +p, mynz*sizeof (int64_t)) ;
#if !GB_ISO_SELECT
memcpy (Cx +pC*asize, Ax +p*asize, mynz*asize) ;
#endif
}
#endif
}
}
}
|
enforce_detgammabar_constraint.h |
void enforce_detgammabar_constraint(rfm_struct *restrict rfmstruct,
const paramstruct *restrict params, REAL *restrict in_gfs) {
#include "set_Cparameters.h"
#pragma omp parallel for
for(int i2=0; i2<Nxx_plus_2NGHOSTS2; i2++) {
#include "rfm_files/rfm_struct__read2.h"
for(int i1=0; i1<Nxx_plus_2NGHOSTS1; i1++) {
#include "rfm_files/rfm_struct__read1.h"
for(int i0=0; i0<Nxx_plus_2NGHOSTS0; i0++) {
#include "rfm_files/rfm_struct__read0.h"
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 1: Read from main memory and compute finite difference stencils:
*/
const double hDD00 = in_gfs[IDX4S(HDD00GF, i0,i1,i2)];
const double hDD01 = in_gfs[IDX4S(HDD01GF, i0,i1,i2)];
const double hDD02 = in_gfs[IDX4S(HDD02GF, i0,i1,i2)];
const double hDD11 = in_gfs[IDX4S(HDD11GF, i0,i1,i2)];
const double hDD12 = in_gfs[IDX4S(HDD12GF, i0,i1,i2)];
const double hDD22 = in_gfs[IDX4S(HDD22GF, i0,i1,i2)];
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 1: Evaluate SymPy expressions and write to main memory:
*/
in_gfs[IDX4S(HDD00GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD00 + 1) - 1;
in_gfs[IDX4S(HDD01GF, i0, i1, i2)] = hDD01*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))));
in_gfs[IDX4S(HDD02GF, i0, i1, i2)] = hDD02*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))));
in_gfs[IDX4S(HDD11GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD11 + 1) - 1;
in_gfs[IDX4S(HDD12GF, i0, i1, i2)] = hDD12*cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))));
in_gfs[IDX4S(HDD22GF, i0, i1, i2)] = cbrt(fabs(((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))/(2*((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD01*hDD02*hDD12 - ((f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD12)*(hDD12))*(hDD00 + 1) - ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*((hDD02)*(hDD02))*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0))) - ((f0_of_xx0)*(f0_of_xx0))*((hDD01)*(hDD01))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))) + (hDD00 + 1)*(((f0_of_xx0)*(f0_of_xx0))*hDD11 + ((f0_of_xx0)*(f0_of_xx0)))*(((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1))*hDD22 + ((f0_of_xx0)*(f0_of_xx0))*((f1_of_xx1)*(f1_of_xx1)))))*(hDD22 + 1) - 1;
} // END LOOP: for(int i0=0; i0<Nxx_plus_2NGHOSTS0; i0++)
} // END LOOP: for(int i1=0; i1<Nxx_plus_2NGHOSTS1; i1++)
} // END LOOP: for(int i2=0; i2<Nxx_plus_2NGHOSTS2; i2++)
}
|
GB_binop__bxor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint8)
// A*D function (colscale): GB (_AxD__bxor_uint8)
// D*A function (rowscale): GB (_DxB__bxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint8)
// C=scalar+B GB (_bind1st__bxor_uint8)
// C=scalar+B' GB (_bind1st_tran__bxor_uint8)
// C=A+scalar GB (_bind2nd__bxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__bxor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT8 || GxB_NO_BXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_mgr.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Two-grid system solver
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "par_mgr.h"
/* Create */
void *
hypre_MGRCreate()
{
hypre_ParMGRData *mgr_data;
mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST);
/* block data */
(mgr_data -> block_size) = 1;
(mgr_data -> block_num_coarse_indexes) = NULL;
(mgr_data -> point_marker_array) = NULL;
(mgr_data -> block_cf_marker) = NULL;
/* general data */
(mgr_data -> max_num_coarse_levels) = 10;
(mgr_data -> A_array) = NULL;
(mgr_data -> P_array) = NULL;
(mgr_data -> RT_array) = NULL;
(mgr_data -> RAP) = NULL;
(mgr_data -> CF_marker_array) = NULL;
(mgr_data -> coarse_indices_lvls) = NULL;
(mgr_data -> A_ff_array) = NULL;
(mgr_data -> F_fine_array) = NULL;
(mgr_data -> U_fine_array) = NULL;
(mgr_data -> aff_solver) = NULL;
(mgr_data -> fine_grid_solver_setup) = NULL;
(mgr_data -> fine_grid_solver_solve) = NULL;
(mgr_data -> F_array) = NULL;
(mgr_data -> U_array) = NULL;
(mgr_data -> residual) = NULL;
(mgr_data -> rel_res_norms) = NULL;
(mgr_data -> Vtemp) = NULL;
(mgr_data -> Ztemp) = NULL;
(mgr_data -> Utemp) = NULL;
(mgr_data -> Ftemp) = NULL;
(mgr_data -> num_iterations) = 0;
(mgr_data -> num_interp_sweeps) = 1;
(mgr_data -> num_restrict_sweeps) = 1;
(mgr_data -> trunc_factor) = 0.0;
(mgr_data -> max_row_sum) = 0.9;
(mgr_data -> strong_threshold) = 0.25;
(mgr_data -> S_commpkg_switch) = 1.0;
(mgr_data -> P_max_elmts) = 0;
(mgr_data -> coarse_grid_solver) = NULL;
(mgr_data -> coarse_grid_solver_setup) = NULL;
(mgr_data -> coarse_grid_solver_solve) = NULL;
(mgr_data -> global_smoother) = NULL;
(mgr_data -> use_default_cgrid_solver) = 1;
(mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used
(mgr_data -> omega) = 1.;
(mgr_data -> max_iter) = 20;
(mgr_data -> tol) = 1.0e-7;
(mgr_data -> relax_type) = 0;
(mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms.
(mgr_data -> interp_type) = NULL;
(mgr_data -> restrict_type) = NULL;
(mgr_data -> num_relax_sweeps) = 1;
(mgr_data -> relax_weight) = 1.0;
(mgr_data -> logging) = 0;
(mgr_data -> print_level) = 0;
(mgr_data -> l1_norms) = NULL;
(mgr_data -> reserved_coarse_size) = 0;
(mgr_data -> reserved_coarse_indexes) = NULL;
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
(mgr_data -> diaginv) = NULL;
(mgr_data -> global_smooth_iters) = 1;
(mgr_data -> global_smooth_type) = 0;
(mgr_data -> set_non_Cpoints_to_F) = 0;
(mgr_data -> idx_array) = NULL;
(mgr_data -> Frelax_method) = NULL;
(mgr_data -> VcycleRelaxVtemp) = NULL;
(mgr_data -> VcycleRelaxZtemp) = NULL;
(mgr_data -> FrelaxVcycleData) = NULL;
(mgr_data -> Frelax_num_functions) = NULL;
(mgr_data -> max_local_lvls) = 10;
(mgr_data -> use_non_galerkin_cg) = NULL;
(mgr_data -> print_coarse_system) = 0;
(mgr_data -> set_c_points_method) = 0;
(mgr_data -> lvl_to_keep_cpoints) = 0;
(mgr_data -> cg_convergence_factor) = 0.0;
return (void *) mgr_data;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/* Destroy */
HYPRE_Int
hypre_MGRDestroy( void *data )
{
hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data;
HYPRE_Int i;
HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels);
/* block info data */
if ((mgr_data -> block_cf_marker))
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
}
}
hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if(mgr_data -> block_num_coarse_indexes)
{
hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* final residual vector */
if((mgr_data -> residual))
{
hypre_ParVectorDestroy( (mgr_data -> residual) );
(mgr_data -> residual) = NULL;
}
if((mgr_data -> rel_res_norms))
{
hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST);
(mgr_data -> rel_res_norms) = NULL;
}
/* temp vectors for solve phase */
if((mgr_data -> Vtemp))
{
hypre_ParVectorDestroy( (mgr_data -> Vtemp) );
(mgr_data -> Vtemp) = NULL;
}
if((mgr_data -> Ztemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ztemp) );
(mgr_data -> Ztemp) = NULL;
}
if((mgr_data -> Utemp))
{
hypre_ParVectorDestroy( (mgr_data -> Utemp) );
(mgr_data -> Utemp) = NULL;
}
if((mgr_data -> Ftemp))
{
hypre_ParVectorDestroy( (mgr_data -> Ftemp) );
(mgr_data -> Ftemp) = NULL;
}
/* coarse grid solver */
if((mgr_data -> use_default_cgrid_solver))
{
if((mgr_data -> coarse_grid_solver))
hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) );
(mgr_data -> coarse_grid_solver) = NULL;
}
/* l1_norms */
if ((mgr_data -> l1_norms))
{
for (i=0; i < (num_coarse_levels); i++)
{
hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]);
}
hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST);
}
/* coarse_indices_lvls */
if ((mgr_data -> coarse_indices_lvls))
{
for (i=0; i < (num_coarse_levels); i++)
if ((mgr_data -> coarse_indices_lvls)[i])
hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST);
hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST);
}
/* linear system and cf marker array */
if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array)
{
for (i=1; i < num_coarse_levels+1; i++) {
hypre_ParVectorDestroy((mgr_data -> F_array)[i]);
hypre_ParVectorDestroy((mgr_data -> U_array)[i]);
if ((mgr_data -> P_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]);
if ((mgr_data -> RT_array)[i-1])
hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]);
hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST);
}
for (i=1; i < (num_coarse_levels); i++) {
if ((mgr_data -> A_array)[i])
hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]);
}
}
/* AMG for Frelax */
if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array)
{
for (i=1; i < num_coarse_levels+1; i++)
{
if (mgr_data -> F_fine_array[i])
hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]);
if (mgr_data -> U_fine_array[i])
hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]);
}
for (i=1; i < (num_coarse_levels); i++)
{
if ((mgr_data -> A_ff_array)[i])
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]);
}
if (mgr_data -> use_default_fsolver)
{
hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]);
}
hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> F_fine_array) = NULL;
hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST);
(mgr_data -> U_fine_array) = NULL;
hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST);
(mgr_data -> A_ff_array) = NULL;
}
if(mgr_data -> aff_solver)
{
for (i = 1; i < (num_coarse_levels); i++) {
if ((mgr_data -> aff_solver)[i])
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]);
}
if (mgr_data -> use_default_fsolver)
{
if ((mgr_data -> aff_solver)[0])
hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]);
}
hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST);
(mgr_data -> aff_solver) = NULL;
}
if((mgr_data -> F_array))
{
hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST);
(mgr_data -> F_array) = NULL;
}
if((mgr_data -> U_array))
{
hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST);
(mgr_data -> U_array) = NULL;
}
if((mgr_data -> A_array))
{
hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST);
(mgr_data -> A_array) = NULL;
}
if((mgr_data -> P_array))
{
hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST);
(mgr_data -> P_array) = NULL;
}
if((mgr_data -> RT_array))
{
hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST);
(mgr_data -> RT_array) = NULL;
}
if((mgr_data -> CF_marker_array))
{
hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST);
(mgr_data -> CF_marker_array) = NULL;
}
if((mgr_data -> reserved_Cpoint_local_indexes))
{
hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_Cpoint_local_indexes) = NULL;
}
if (mgr_data -> restrict_type)
{
hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
if (mgr_data -> interp_type)
{
hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
/* Frelax_method */
if (mgr_data -> Frelax_method)
{
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
/* Frelax_num_functions */
if (mgr_data -> Frelax_num_functions)
{
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
/* data for V-cycle F-relaxation */
if((mgr_data -> VcycleRelaxVtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) );
(mgr_data -> VcycleRelaxVtemp) = NULL;
}
if((mgr_data -> VcycleRelaxZtemp))
{
hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) );
(mgr_data -> VcycleRelaxZtemp) = NULL;
}
if (mgr_data -> FrelaxVcycleData) {
for (i = 0; i < num_coarse_levels; i++) {
if ((mgr_data -> FrelaxVcycleData)[i]) {
hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]);
(mgr_data -> FrelaxVcycleData)[i] = NULL;
}
}
hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST);
(mgr_data -> FrelaxVcycleData) = NULL;
}
/* data for reserved coarse nodes */
if(mgr_data -> reserved_coarse_indexes)
{
hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* index array for setting Cpoints by global block */
if ((mgr_data -> set_c_points_method) == 1)
{
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
/* array for setting option to use non-Galerkin coarse grid */
if (mgr_data -> use_non_galerkin_cg)
{
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
/* coarse level matrix - RAP */
if ((mgr_data -> RAP))
hypre_ParCSRMatrixDestroy((mgr_data -> RAP));
if ((mgr_data -> diaginv))
hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST);
if ((mgr_data -> global_smoother))
{
if (mgr_data -> global_smooth_type == 8)
{
HYPRE_EuclidDestroy((mgr_data -> global_smoother));
}
else if (mgr_data -> global_smooth_type == 16)
{
HYPRE_ILUDestroy((mgr_data -> global_smoother));
}
}
/* mgr data */
hypre_TFree(mgr_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Create data for V-cycle F-relaxtion */
void *
hypre_MGRCreateFrelaxVcycleData()
{
hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST);
hypre_ParAMGDataAArray(vdata) = NULL;
hypre_ParAMGDataPArray(vdata) = NULL;
hypre_ParAMGDataFArray(vdata) = NULL;
hypre_ParAMGDataCFMarkerArray(vdata) = NULL;
hypre_ParAMGDataVtemp(vdata) = NULL;
hypre_ParAMGDataAMat(vdata) = NULL;
hypre_ParAMGDataBVec(vdata) = NULL;
hypre_ParAMGDataZtemp(vdata) = NULL;
hypre_ParAMGDataCommInfo(vdata) = NULL;
hypre_ParAMGDataUArray(vdata) = NULL;
hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL;
hypre_ParAMGDataNumLevels(vdata) = 0;
hypre_ParAMGDataMaxLevels(vdata) = 10;
hypre_ParAMGDataNumFunctions(vdata) = 1;
hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0;
hypre_ParAMGDataRelaxOrder(vdata) = 1;
hypre_ParAMGDataMaxCoarseSize(vdata) = 9;
hypre_ParAMGDataMinCoarseSize(vdata) = 0;
hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9;
return (void *) vdata;
}
/* Destroy data for V-cycle F-relaxation */
HYPRE_Int
hypre_MGRDestroyFrelaxVcycleData( void *data )
{
hypre_ParAMGData * vdata = (hypre_ParAMGData*) data;
HYPRE_Int i;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST);
for (i=1; i < num_levels + 1; i++)
{
if (hypre_ParAMGDataAArray(vdata)[i])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]);
if (hypre_ParAMGDataPArray(vdata)[i-1])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST);
}
/* see comments in par_coarsen.c regarding special case for CF_marker */
if (num_levels <= 1)
{
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST);
}
/* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */
//hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata));
hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST);
//hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST);
/* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */
/*
if (hypre_ParAMGDataZtemp(vdata))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata));
*/
if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST);
if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST);
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(vdata, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* Set C-point variables for each reduction level */
/* Currently not implemented */
HYPRE_Int
hypre_MGRSetReductionLevelCpoints( void *mgr_vdata,
HYPRE_Int nlevels,
HYPRE_Int *num_coarse_points,
HYPRE_Int **level_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_coarse_levels) = nlevels;
(mgr_data -> num_coarse_per_level) = num_coarse_points;
(mgr_data -> level_coarse_indexes) = level_coarse_indexes;
return hypre_error_flag;
}
/* Initialize some data */
/* Set whether non-coarse points on each level should be explicitly tagged as F-points */
HYPRE_Int
hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag;
return hypre_error_flag;
}
/* Set whether the reserved C points are reduced before the coarse grid solve */
HYPRE_Int
hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> lvl_to_keep_cpoints) = level;
return hypre_error_flag;
}
/* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */
HYPRE_Int
hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_BigInt *begin_idx_array,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
if((mgr_data -> idx_array) != NULL) {
hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST);
(mgr_data -> idx_array) = NULL;
}
HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST);
if (begin_idx_array != NULL)
{
for (i = 0; i < block_size; i++) {
index_array[i] = *(begin_idx_array+i);
}
}
hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes);
(mgr_data -> idx_array) = index_array;
(mgr_data -> set_c_points_method) = 1;
return hypre_error_flag;
}
/* Initialize/ set local block data information */
HYPRE_Int
hypre_MGRSetCpointsByBlock( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *block_num_coarse_points,
HYPRE_Int **block_coarse_indexes)
{
HYPRE_Int i,j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
/* free block cf_marker data if not previously destroyed */
if((mgr_data -> block_cf_marker) != NULL)
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for(j=0; j<block_num_coarse_points[i]; j++)
{
(block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK;
}
}
/* store block_num_coarse_points */
if(max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for(i=0; i<max_num_levels; i++)
block_num_coarse_indexes[i] = block_num_coarse_points[i];
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> set_c_points_method) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata,
HYPRE_Int block_size,
HYPRE_Int max_num_levels,
HYPRE_Int *lvl_num_coarse_points,
HYPRE_Int **lvl_coarse_indexes,
HYPRE_Int *point_marker_array)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i,j;
HYPRE_Int **block_cf_marker = NULL;
HYPRE_Int *block_num_coarse_indexes = NULL;
/* free block cf_marker data if not previously destroyed */
if((mgr_data -> block_cf_marker) != NULL)
{
for (i=0; i < (mgr_data -> max_num_coarse_levels); i++)
{
if ((mgr_data -> block_cf_marker)[i])
{
hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker)[i] = NULL;
}
}
hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST);
(mgr_data -> block_cf_marker) = NULL;
}
if((mgr_data -> block_num_coarse_indexes))
{
hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> block_num_coarse_indexes) = NULL;
}
/* store block cf_marker */
block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST);
for (i = 0; i < max_num_levels; i++)
{
block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST);
memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int));
}
for (i = 0; i < max_num_levels; i++)
{
for(j=0; j<lvl_num_coarse_points[i]; j++)
{
block_cf_marker[i][j] = lvl_coarse_indexes[i][j];
}
}
/* store block_num_coarse_points */
if(max_num_levels > 0)
{
block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST);
for(i=0; i<max_num_levels; i++)
block_num_coarse_indexes[i] = lvl_num_coarse_points[i];
}
/* set block data */
(mgr_data -> max_num_coarse_levels) = max_num_levels;
(mgr_data -> block_size) = block_size;
(mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes;
(mgr_data -> block_cf_marker) = block_cf_marker;
(mgr_data -> point_marker_array) = point_marker_array;
(mgr_data -> set_c_points_method) = 2;
return hypre_error_flag;
}
/*Set number of points that remain part of the coarse grid throughout the hierarchy */
HYPRE_Int
hypre_MGRSetReservedCoarseNodes(void *mgr_vdata,
HYPRE_Int reserved_coarse_size,
HYPRE_BigInt *reserved_cpt_index)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_BigInt *reserved_coarse_indexes = NULL;
HYPRE_Int i;
if (!mgr_data)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n");
return hypre_error_flag;
}
if(reserved_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/* free data not previously destroyed */
if((mgr_data -> reserved_coarse_indexes))
{
hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST);
(mgr_data -> reserved_coarse_indexes) = NULL;
}
/* set reserved coarse nodes */
if(reserved_coarse_size > 0)
{
reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST);
for(i=0; i<reserved_coarse_size; i++)
reserved_coarse_indexes[i] = reserved_cpt_index[i];
}
(mgr_data -> reserved_coarse_size) = reserved_coarse_size;
(mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes;
return hypre_error_flag;
}
/* Set CF marker array */
HYPRE_Int
hypre_MGRCoarsen(hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int fixed_coarse_size,
HYPRE_Int *fixed_coarse_indexes,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr,
HYPRE_Int cflag)
{
HYPRE_Int *CF_marker = NULL;
HYPRE_Int *cindexes = fixed_coarse_indexes;
HYPRE_Int i, row, nc;
HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* If this is the last level, coarsen onto fixed coarse set */
if(cflag)
{
if(*CF_marker_ptr != NULL)
{
hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST);
}
CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST);
memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int));
/* first mark fixed coarse set */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
}
else
{
/* First coarsen to get initial CF splitting.
* This is then followed by updating the CF marker to pass
* coarse information to the next levels. NOTE: It may be
* convenient to implement this way (allows the use of multiple
* coarsening strategies without changing too much code),
* but not necessarily the best option, compared to initializing
* CF_marker first and then coarsening on subgraph which excludes
* the initialized coarse nodes.
*/
hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker);
/* Update CF_marker to correct Cpoints marked as Fpoints. */
nc = fixed_coarse_size;
for(i = 0; i < nc; i++)
{
CF_marker[cindexes[i]] = CMRK;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
for (row = 0; row <nloc; row++)
{
if(CF_marker[row] == CMRK) continue;
CF_marker[row] = FMRK;
}
#if 0
/* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points
* in the next level.
*/
nc = 0;
index_i = 0;
for (row = 0; row <nloc; row++)
{
/* loop through new c-points */
if(CF_marker[row] == CMRK) nc++;
else if(CF_marker[row] == S_CMRK)
{
/* previously marked c-point is part of fixed coarse set. Track its current local index */
cindexes[index_i++] = nc;
/* reset c-point from S_CMRK to CMRK */
cf_marker[row] = CMRK;
nc++;
}
/* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate
* between type of F-points (example Ruge coarsening). We do not need that distinction here.
*/
else
{
CF_marker[row] = FMRK;
}
}
/* check if this should be last level */
if( nc == fixed_coarse_size)
last_level = 1;
//printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size);
#endif
}
/* set CF_marker */
*CF_marker_ptr = CF_marker;
return hypre_error_flag;
}
/* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */
HYPRE_Int
hypre_MGRBuildP( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int method,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ((CF_marker[i1] >= 0) && (method > 0))
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
fine_to_coarse[i] += coarse_shift;
}
}
/* index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt;
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] < 0)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if ((CF_marker[i1] >= 0) && (method > 0))
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
/*
if(method == 0)
{
P_diag_data[jj_counter] = 0.0;
}
*/
if (method == 1)
{
P_diag_data[jj_counter] = - A_diag_data[jj];
}
else if (method == 2)
{
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
}
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if ((CF_marker_offd[i1] >= 0) && (method > 0))
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
/*
if(method == 0)
{
P_offd_data[jj_counter_offd] = 0.0;
}
*/
if (method == 1)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj];
}
else if (method == 2)
{
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
}
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Interpolation for MGR - Dynamic Row Sum method */
HYPRE_Int
hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *a_diag;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
// HYPRE_Int jj_begin_row,jj_begin_row_offd;
// HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int num_cols_P_offd;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
else
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
/*--------------------------------------------------------------------
* Set up the indexes for the DRS method
*--------------------------------------------------------------------*/
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
//fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
//for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if ( i==i1 ) /* diagonal of A only */
{
a_diag[i] = 1.0/A_diag_data[jj];
}
}
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i];
jj_counter++;
}
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i];
jj_counter_offd++;
}
}
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
hypre_TFree(a_diag, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
// hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Scale ParCSR matrix A = scalar * A
* A: the target CSR matrix
* vector: array of real numbers
*/
HYPRE_Int
hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector,
hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j, n_local;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
n_local = hypre_CSRMatrixNumRows(A_diag);
for (i = 0; i < n_local; i++)
{
HYPRE_Real factor = vector[i];
for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
A_diag_data[j] *= factor;
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
A_offd_data[j] *= factor;
}
}
return(0);
}
/************************************************************
* Available methods:
* 0: inv(A_FF) approximated by its diagonal inverse
* 1: inv(A_FF) approximated by sparse approximate inverse
*************************************************************/
HYPRE_Int
hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix *RT,
HYPRE_Int bsize,
HYPRE_Int ordering,
HYPRE_Int method,
HYPRE_Int Pmax,
HYPRE_Int keep_stencil,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix **A_h_ptr)
{
HYPRE_Int *c_marker, *f_marker;
HYPRE_Int n_local_fine_grid, i, i1, jj;
hypre_ParCSRMatrix *A_cc;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_cf;
hypre_ParCSRMatrix *A_h;
hypre_ParCSRMatrix *A_h_correction;
HYPRE_Int max_elmts = Pmax;
// HYPRE_Real wall_time = 0.;
hypre_ParCSRMatrix *P_mod = NULL;
HYPRE_Int my_id;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Comm_rank(comm,&my_id);
n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fine_grid; i++)
{
HYPRE_Int point_type = CF_marker[i];
assert(point_type == 1 || point_type == -1);
c_marker[i] = point_type;
f_marker[i] = -point_type;
}
// get the A_cc sub-block
hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc);
if (method == 0)
{
if (keep_stencil)
{
//wall_time = time_getWallclockSeconds();
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
// extract the diagonal of A_ff and compute D_ff_inv
hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff);
HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag);
HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag);
HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag);
HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag);
HYPRE_Real *D_ff_inv;
D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++)
{
i1 = A_ff_diag_j[jj];
if ( i==i1 )
{
D_ff_inv[i] = -1.0/A_ff_diag_data[jj];
}
}
}
// extract the diagonal of A_cf
hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf);
HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag);
HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag);
HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag);
n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag);
HYPRE_Real *D_cf;
D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST);
for (i = 0; i < n_local_fpoints; i++)
{
i1 = A_cf_diag_j[A_cf_diag_i[i]];
D_cf[i] = A_cf_diag_data[jj];
}
// compute the triple product
hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc);
hypre_ParCSRMatrixLeftScale(D_cf, A_fc);
A_h_correction = A_fc;
hypre_TFree(D_cf, HYPRE_MEMORY_HOST);
hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_cf);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time);
}
else
{
//wall_time = time_getWallclockSeconds();
P_mod = hypre_ParCSRMatrixCompleteClone(P);
hypre_ParCSRMatrixCopy(P,P_mod,1);
HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod);
hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod);
HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag);
HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag);
for (i = 0; i < n_local_rows; i ++)
{
if (CF_marker[i] >= 0)
{
HYPRE_Int ii = P_mod_diag_i[i];
P_mod_diag_data[ii] = 0.0;
}
}
hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Compute triple product time new: %1.5f\n", wall_time);
hypre_ParCSRMatrixDestroy(P_mod);
}
}
else
{
// Approximate inverse for ideal interploation
hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf);
hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc);
hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff);
hypre_ParCSRMatrix *A_ff_inv = NULL;
hypre_ParCSRMatrix *minus_Wp = NULL;
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
A_h_correction = hypre_ParMatmul(A_cf, minus_Wp);
hypre_ParCSRMatrixDestroy(minus_Wp);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_cf);
}
// perform dropping for A_h_correction
// specific to multiphase poromechanics
// we only keep the diagonal of each block
//wall_time = time_getWallclockSeconds();
HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction));
hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction);
HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag);
HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag);
HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag);
hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction);
HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd);
HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd);
// Allow for maximum dropping with Pmax = 0
//if (Pmax > 0)
//{
if (ordering == 0) // interleaved ordering
{
HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST);
HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST);
HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST);
HYPRE_Int num_nonzeros_diag_new = 0;
HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST);
HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST);
HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST);
HYPRE_Int num_nonzeros_offd_new = 0;
for (i = 0; i < n_local_cpoints; i++)
{
HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i];
HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST);
HYPRE_Int row_start = i - (i % bsize);
HYPRE_Int row_stop = row_start + bsize - 1;
HYPRE_Int cnt = 0;
for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++)
{
aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag;
aux_data[cnt] = A_h_correction_offd_data[jj];
cnt++;
}
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++)
{
aux_j[cnt] = A_h_correction_diag_j[jj];
aux_data[cnt] = A_h_correction_diag_data[jj];
cnt++;
}
hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1);
for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++)
{
i1 = A_h_correction_diag_j[jj];
if (i1 >= row_start && i1 <= row_stop)
{
// copy data to new arrays
A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj];
++num_nonzeros_diag_new;
}
else
{
// Do nothing
}
}
if (max_elmts > 0)
{
for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++)
{
HYPRE_Int col_idx = aux_j[jj];
HYPRE_Real col_value = aux_data[jj];
if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop))
{
A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx;
A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value;
++num_nonzeros_diag_new;
}
else if (col_idx >= ncol_diag)
{
A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag;
A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value;
++num_nonzeros_offd_new;
}
}
}
A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new;
A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new;
hypre_TFree(aux_j, HYPRE_MEMORY_HOST);
hypre_TFree(aux_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new;
hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new;
hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new;
if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST);
if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST);
if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST);
hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new;
hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new;
hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new;
hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new;
}
else
{
hypre_printf("Error!! Block ordering is not supported at the moment\n");
exit(-1);
}
//}
//hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts);
//wall_time = time_getWallclockSeconds() - wall_time;
//hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time);
//hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered");
// coarse grid / schur complement
hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h);
*A_h_ptr = A_h;
//hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h");
hypre_ParCSRMatrixDestroy(A_cc);
hypre_ParCSRMatrixDestroy(A_h_correction);
hypre_TFree(c_marker, HYPRE_MEMORY_HOST);
hypre_TFree(f_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A,
HYPRE_BigInt *mgr_idx_array,
HYPRE_Solver A_ff_solver)
{
HYPRE_Int *U_marker, *S_marker, *P_marker;
HYPRE_Int n_fine, i;
HYPRE_BigInt ibegin;
hypre_ParCSRMatrix *A_up;
hypre_ParCSRMatrix *A_uu;
hypre_ParCSRMatrix *A_su;
hypre_ParCSRMatrix *A_pu;
hypre_ParVector *e1_vector;
hypre_ParVector *e2_vector;
hypre_ParVector *e3_vector;
hypre_ParVector *e4_vector;
hypre_ParVector *e5_vector;
n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
ibegin = hypre_ParCSRMatrixFirstRowIndex(A);
hypre_assert(ibegin == mgr_idx_array[0]);
U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
U_marker[i] = -1;
S_marker[i] = -1;
P_marker[i] = -1;
}
// create C and F markers
for (i = 0; i < n_fine; i++)
{
if (i < mgr_idx_array[1] - ibegin)
{
U_marker[i] = 1;
}
else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin))
{
S_marker[i] = 1;
}
else
{
P_marker[i] = 1;
}
}
// Get A_up
hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up);
// GetA_uu
hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu);
// Get A_su
hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su);
// Get A_pu
hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu);
e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up),
hypre_ParCSRMatrixGlobalNumCols(A_up),
hypre_ParCSRMatrixColStarts(A_up));
hypre_ParVectorInitialize(e1_vector);
hypre_ParVectorSetPartitioningOwner(e1_vector,0);
hypre_ParVectorSetConstantValues(e1_vector, 1.0);
e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e2_vector);
hypre_ParVectorSetPartitioningOwner(e2_vector,0);
hypre_ParVectorSetConstantValues(e2_vector, 0.0);
e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu),
hypre_ParCSRMatrixGlobalNumRows(A_uu),
hypre_ParCSRMatrixRowStarts(A_uu));
hypre_ParVectorInitialize(e3_vector);
hypre_ParVectorSetPartitioningOwner(e3_vector,0);
hypre_ParVectorSetConstantValues(e3_vector, 0.0);
e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su),
hypre_ParCSRMatrixGlobalNumRows(A_su),
hypre_ParCSRMatrixRowStarts(A_su));
hypre_ParVectorInitialize(e4_vector);
hypre_ParVectorSetPartitioningOwner(e4_vector,0);
hypre_ParVectorSetConstantValues(e4_vector, 0.0);
e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu),
hypre_ParCSRMatrixGlobalNumRows(A_pu),
hypre_ParCSRMatrixRowStarts(A_pu));
hypre_ParVectorInitialize(e5_vector);
hypre_ParVectorSetPartitioningOwner(e5_vector,0);
hypre_ParVectorSetConstantValues(e5_vector, 0.0);
// compute e2 = A_up * e1
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector);
// solve e3 = A_uu^-1 * e2
hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// compute e4 = A_su * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector);
// print e4
hypre_ParVectorPrintIJ(e4_vector,1,"Dsp");
// compute e5 = A_pu * e3
hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector);
hypre_ParVectorPrintIJ(e5_vector,1,"Dpp");
hypre_ParVectorDestroy(e1_vector);
hypre_ParVectorDestroy(e2_vector);
hypre_ParVectorDestroy(e3_vector);
hypre_ParCSRMatrixDestroy(A_uu);
hypre_ParCSRMatrixDestroy(A_up);
hypre_ParCSRMatrixDestroy(A_pu);
hypre_ParCSRMatrixDestroy(A_su);
hypre_TFree(U_marker, HYPRE_MEMORY_HOST);
hypre_TFree(S_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **A_inv)
{
HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version;
HYPRE_Real mr_tol, nsh_tol;
HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *approx_A_inv = NULL;
print_level = 0;
nsh_max_iter = 2;
nsh_max_row_nnz = 2; // default 1000
mr_max_iter = 1;
mr_tol = 1.0e-3;
mr_max_row_nnz = 2; // default 800
mr_col_version = 0;
nsh_tol = 1.0e-3;
droptol[0] = 1.0e-2;
droptol[1] = 1.0e-2;
hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz,
nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level);
*A_inv = approx_A_inv;
if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *S,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i, jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1)? 1: -1;
F_marker[i] = (CF_marker[i] == 1) ? -1: 1;
}
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
// compute -Wp
minus_Wp = hypre_ParMatmul(S, A_fc);
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
// my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i+1] = jj_counter_offd;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
*P_ptr = P;
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
HYPRE_Int
hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **P_ptr)
{
HYPRE_Int *C_marker;
HYPRE_Int *F_marker;
hypre_ParCSRMatrix *A_ff;
hypre_ParCSRMatrix *A_fc;
hypre_ParCSRMatrix *A_ff_inv;
hypre_ParCSRMatrix *minus_Wp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int jj_counter,jj_counter_offd;
//HYPRE_Int jj_begin_row,jj_begin_row_offd;
//HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *fine_to_coarse = NULL;
//HYPRE_Int *coarse_counter;
HYPRE_Int coarse_counter;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
// HYPRE_BigInt my_first_cpt;
HYPRE_Int i,jj;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
// HYPRE_Int num_threads;
// HYPRE_Real wall_time; /* for debugging instrumentation */
C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
// create C and F markers
for (i = 0; i < n_fine; i++)
{
C_marker[i] = (CF_marker[i] == 1)? 1: -1;
F_marker[i] = (CF_marker[i] == 1) ? -1: 1;
}
// Get A_FF
hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff);
// Get A_FC
hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc);
hypre_MGRApproximateInverse(A_ff, &A_ff_inv);
hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv");
hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc");
minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc);
hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp");
hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp);
HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag);
HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag);
hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd);
HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd);
//hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp);
//HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
// my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
// my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
//coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
//jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
HYPRE_Int row_counter = 0;
coarse_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] > 0)
{
//jj_count[j]++;
//fine_to_coarse[i] = coarse_counter[j];
//coarse_counter[j]++;
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
else
{
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc}
*--------------------------------------------------------------------*/
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
//jj_count[j]++;
jj_counter++;
}
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
//jj_count_offd[j]++;
jj_counter_offd++;
}
}
row_counter++;
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
/*
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
*/
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
*/
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
/*
if (num_procs > 1)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
fine_to_coarse[i] += my_first_cpt;
}
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(minus_Wp);
comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
}
*/
row_counter = 0;
for (i = 0; i < n_fine; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++)
{
//P_marker[row_counter] = jj_counter;
P_diag_j[jj_counter] = minus_Wp_diag_j[jj];
P_diag_data[jj_counter] = - minus_Wp_diag_data[jj];
jj_counter++;
}
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++)
{
//P_marker_offd[row_counter] = jj_counter_offd;
P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj];
P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj];
jj_counter_offd++;
}
}
row_counter++;
}
P_offd_i[i+1] = jj_counter_offd;
}
//hypre_printf("Num rows of Wp = %d\n", row_counter);
//P_offd_i[row_counter] = jj_counter_offd;
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd);
HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp);
if (P_offd_size)
{
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
{
col_map_offd_P[i] = col_map_offd_tmp[i];
}
}
/*
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_minus_Wp_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
*/
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_MatvecCommPkgCreate(P);
//hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
//hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
//if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
//hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
hypre_TFree(C_marker, HYPRE_MEMORY_HOST);
hypre_TFree(F_marker, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixDestroy(A_ff);
hypre_ParCSRMatrixDestroy(A_fc);
hypre_ParCSRMatrixDestroy(A_ff_inv);
hypre_ParCSRMatrixDestroy(minus_Wp);
return 0;
}
/* Setup interpolation operator */
HYPRE_Int
hypre_MGRBuildInterp(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P,
HYPRE_Int interp_type,
HYPRE_Int numsweeps)
{
//HYPRE_Int i;
hypre_ParCSRMatrix *P_ptr = NULL;
//HYPRE_Real jac_trunc_threshold = trunc_factor;
//HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Interpolation for each level */
if (interp_type <3)
{
hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr);
/* Could do a few sweeps of Jacobi to further improve P */
//for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
//hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else if (interp_type == 4)
{
hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else if (interp_type == 99)
{
hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr);
hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts);
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag,
trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr);
/* Do k steps of Jacobi build W for P = [-W I].
* Note that BoomerAMGJacobiInterp assumes you have some initial P,
* hence we need to initialize P as above, before calling this routine.
* If numsweeps = 0, the following step is skipped and P is returned as is.
* Looping here is equivalent to improving P by Jacobi interpolation
*/
//for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker,
// 0, jac_trunc_threshold,
// jac_trunc_threshold_minus );
}
/* set pointer to P */
*P = P_ptr;
return hypre_error_flag;
}
/* Setup restriction operator */
HYPRE_Int
hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Real S_commpkg_switch,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
hypre_ParCSRMatrix **R,
HYPRE_Int restrict_type,
HYPRE_Int numsweeps)
{
// HYPRE_Int i;
hypre_ParCSRMatrix *R_ptr = NULL;
hypre_ParCSRMatrix *AT = NULL;
hypre_ParCSRMatrix *ST = NULL;
HYPRE_Int *col_offd_ST_to_AT = NULL;
// HYPRE_Real jac_trunc_threshold = trunc_factor;
// HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold;
/* Build AT (transpose A) */
if (restrict_type > 0)
{
hypre_ParCSRMatrixTranspose(A, &AT, 1);
}
if (restrict_type > 5)
{
/* Build new strength matrix */
hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST);
/* use appropriate communication package for Strength matrix */
if (strong_threshold > S_commpkg_switch)
hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT);
}
/* Interpolation for each level */
if (restrict_type == 0)
{
hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
}
else if (restrict_type == 1 || restrict_type == 2)
{
hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr);
/* Could do a few sweeps of Jacobi to further improve P */
//for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus );
//hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts);
}
else if (restrict_type == 4)
{
hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr);
hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts);
}
else
{
/* Classical modified interpolation */
hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag,
trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr);
/* Do k steps of Jacobi build W for P = [-W I].
* Note that BoomerAMGJacobiInterp assumes you have some initial P,
* hence we need to initialize P as above, before calling this routine.
* If numsweeps = 0, the following step is skipped and P is returned as is.
* Looping here is equivalent to improving P by Jacobi interpolation
*/
// for(i=0; i<numsweeps; i++)
// hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0,
// jac_trunc_threshold, jac_trunc_threshold_minus);
}
/* set pointer to P */
*R = R_ptr;
/* Free memory */
if (restrict_type > 0)
{
hypre_ParCSRMatrixDestroy(AT);
}
if (restrict_type > 5)
{
hypre_ParCSRMatrixDestroy(ST);
if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
void hypre_blas_smat_inv_n4 (HYPRE_Real *a)
{
const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3];
const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7];
const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11];
const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15];
const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42;
const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43;
const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42;
const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33;
const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43;
const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41;
const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43;
const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31;
const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41;
const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42;
const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41;
const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32;
const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42;
const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41;
const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42;
const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31;
const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41;
HYPRE_Real det_inv;
//if ( fabs(det) < 1e-22 ) {
//hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det);
/*
printf("##----------------------------------------------\n");
printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2);
printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5);
printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7);
printf("##----------------------------------------------\n");
getchar();
*/
//}
det_inv = 1.0/det;
a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv;
a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv;
a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv;
a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv;
}
void hypre_blas_mat_inv(HYPRE_Real *a,
HYPRE_Int n)
{
HYPRE_Int i,j,k,l,u,kn,in;
HYPRE_Real alinv;
if (n == 4)
{
hypre_blas_smat_inv_n4(a);
}
else
{
for (k=0; k<n; ++k) {
kn = k*n;
l = kn+k;
//if (fabs(a[l]) < SMALLREAL) {
// printf("### WARNING: Diagonal entry is close to zero!");
// printf("### WARNING: diag_%d=%e\n", k, a[l]);
// a[l] = SMALLREAL;
//}
alinv = 1.0/a[l];
a[l] = alinv;
for (j=0; j<k; ++j) {
u = kn+j; a[u] *= alinv;
}
for (j=k+1; j<n; ++j) {
u = kn+j; a[u] *= alinv;
}
for (i=0; i<k; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=k+1; i<n; ++i) {
in = i*n;
for (j=0; j<n; ++j)
if (j!=k) {
u = in+j; a[u] -= a[in+k]*a[kn+j];
} // end if (j!=k)
}
for (i=0; i<k; ++i) {
u=i*n+k; a[u] *= -alinv;
}
for (i=k+1; i<n; ++i) {
u=i*n+k; a[u] *= -alinv;
}
} // end for (k=0; k<n; ++k)
}// end if
}
HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr,
void *mgr_vdata, HYPRE_Int debug_flag)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int num_procs, my_id;
HYPRE_Int blk_size = (mgr_data -> block_size);
HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag;
HYPRE_Real *B_diag_data;
HYPRE_Int *B_diag_i;
HYPRE_Int *B_diag_j;
hypre_CSRMatrix *B_offd;
HYPRE_Int i,ii;
HYPRE_Int j,jj;
HYPRE_Int k;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int n_block, left_size,inv_size;
// HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Real * diaginv;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int block_scaling_error = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
//printf("n = %d\n",n);
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
//printf("inv_size = %d\n",inv_size);
hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv));
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* First Pass: Determine size of B and fill in
*-----------------------------------------------------------------------*/
B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST);
B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
B_diag_i[n] = inv_size;
//B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
//B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
//B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST);
//B_offd_i[n] = 1;
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST);
//printf("n_block = %d\n",n_block);
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
/* for (k = 0;k < blk_size; k++) */
/* { */
/* for (j = 0;j < blk_size; j++) */
/* { */
/* bidx = k*blk_size + j; */
/* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */
/* } */
/* } */
hypre_blas_mat_inv(diaginv, blk_size);
for (k = 0;k < blk_size; k++)
{
B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size;
//B_offd_i[i*nb2+k] = 0;
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
B_diag_j[bidx] = i*blk_size + j;
B_diag_data[bidx] = diaginv[k*blk_size + j];
}
}
}
//printf("Before create\n");
B = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
0,
inv_size,
0);
//printf("After create\n");
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixData(B_diag) = B_diag_data;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixData(B_offd) = NULL;
hypre_CSRMatrixI(B_offd) = NULL;
hypre_CSRMatrixJ(B_offd) = NULL;
/* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */
*B_ptr = B;
return(block_scaling_error);
}
HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Int method,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size*blk_size;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
// HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*blk_size +j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++)
{
ii = A_diag_j[jj];
if (method == 0)
{
// Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
else if (method == 1)
{
// Gauss-Seidel for diagonal part
res[j] -= A_diag_data[jj] * u_data[ii];
}
else
{
// Default do Jacobi for diagonal part
res[j] -= A_diag_data[jj] * Vtemp_data[ii];
}
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++)
{
// always do Jacobi for off-diagonal part
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0;j < blk_size; j++)
{
bidx1 = i*blk_size +j;
for (k = 0;k < blk_size; k++)
{
bidx = i*nb2 +j*blk_size+k;
u_data[bidx1] += res[k]*diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return(relax_error);
}
HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Real blk_size,
HYPRE_Int n_block,
HYPRE_Int left_size,
HYPRE_Real *diaginv,
hypre_ParVector *Vtemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data = NULL;
HYPRE_Real *v_buf_data;
HYPRE_Int i, j, k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidx1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, my_id;
HYPRE_Real *res;
const HYPRE_Int nb2 = blk_size*blk_size;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
//printf("u_old[%d] = %e\n",i,Vtemp_data[i]);
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax points block by block
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*blk_size +j;
res[j] = f_data[bidx];
for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++)
{
ii = A_diag_j[jj];
//res[j] -= A_diag_data[jj] * Vtemp_data[ii];
//printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]);
res[j] -= A_diag_data[jj] * u_data[ii];
//printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]);
}
for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++)
{
ii = A_offd_j[jj];
res[j] -= A_offd_data[jj] * Vext_data[ii];
}
//printf("%d: res = %e\n",bidx,res[j]);
}
for (j = 0;j < blk_size; j++)
{
bidx1 = i*blk_size +j;
for (k = 0;k < blk_size; k++)
{
bidx = i*nb2 +j*blk_size+k;
u_data[bidx1] += res[k]*diaginv[bidx];
//printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]);
}
//printf("u[%d] = %e\n",bidx1,u_data[bidx1]);
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data, HYPRE_MEMORY_HOST);
hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(res, HYPRE_MEMORY_HOST);
return(relax_error);
}
/*Block smoother*/
HYPRE_Int
hypre_blockRelax_setup(hypre_ParCSRMatrix *A,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Real **diaginvptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv = *diaginvptr;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
if (diaginv !=NULL)
{
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
else {
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
*diaginvptr = diaginv;
return 1;
}
HYPRE_Int
hypre_blockRelax(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int blk_size,
HYPRE_Int reserved_coarse_size,
HYPRE_Int method,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int i, j,k;
HYPRE_Int ii, jj;
HYPRE_Int bidx,bidxm1,bidxp1;
HYPRE_Int relax_error = 0;
HYPRE_Int num_procs, my_id;
const HYPRE_Int nb2 = blk_size*blk_size;
HYPRE_Int n_block;
HYPRE_Int left_size,inv_size;
HYPRE_Real *diaginv;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//HYPRE_Int num_threads = hypre_NumThreads();
if (my_id == num_procs)
{
n_block = (n - reserved_coarse_size) / blk_size;
left_size = n - blk_size*n_block;
}
else
{
n_block = n / blk_size;
left_size = n - blk_size*n_block;
}
inv_size = nb2*n_block + left_size*left_size;
diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------
* Get all the diagonal sub-blocks
*-----------------------------------------------------------------*/
for (i = 0;i < n_block; i++)
{
bidxm1 = i*blk_size;
bidxp1 = (i+1)*blk_size;
//printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1);
for (k = 0;k < blk_size; k++)
{
for (j = 0;j < blk_size; j++)
{
bidx = i*nb2 + k*blk_size + j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++)
{
jj = A_diag_j[ii];
if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL)
{
bidx = i*nb2 + k*blk_size + jj - bidxm1;
//printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx);
diaginv[bidx] = A_diag_data[ii];
}
}
}
}
for (i = 0;i < left_size; i++)
{
bidxm1 =n_block*nb2 + i*blk_size;
bidxp1 =n_block*nb2 + (i+1)*blk_size;
for (j = 0;j < left_size; j++)
{
bidx = n_block*nb2 + i*blk_size +j;
diaginv[bidx] = 0.0;
}
for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++)
{
jj = A_diag_j[ii];
if (jj > n_block*blk_size)
{
bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size;
diaginv[bidx] = A_diag_data[ii];
}
}
}
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
/*-----------------------------------------------------------------
* compute the inverses of all the diagonal sub-blocks
*-----------------------------------------------------------------*/
if (blk_size > 1)
{
for (i = 0;i < n_block; i++)
{
hypre_blas_mat_inv(diaginv+i*nb2, blk_size);
}
hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size);
/*
for (i = 0;i < n_block; i++)
{
for (j = 0;j < blk_size; j++)
{
for (k = 0;k < blk_size; k ++)
{
bidx = i*nb2 + j*blk_size + k;
printf("%e\t",diaginv[bidx]);
}
printf("\n");
}
printf("\n");
}
*/
}
else
{
for (i = 0;i < n; i++)
{
// FIX-ME: zero-diagonal should be tested previously
if (fabs(diaginv[i]) < SMALLREAL)
diaginv[i] = 0.0;
else
diaginv[i] = 1.0 / diaginv[i];
}
}
hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp);
/*-----------------------------------------------------------------
* Free temperary memeory
*-----------------------------------------------------------------*/
hypre_TFree(diaginv, HYPRE_MEMORY_HOST);
return(relax_error);
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetFSolver( void *mgr_vdata,
HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*),
HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*),
void *fsolver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
HYPRE_Solver **aff_solver = (mgr_data -> aff_solver);
if (aff_solver == NULL)
aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST);
/* only allow to set F-solver for the first level */
aff_solver[0] = (HYPRE_Solver *) fsolver;
(mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve;
(mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup;
(mgr_data -> aff_solver) = aff_solver;
(mgr_data -> use_default_fsolver) = 0;
return hypre_error_flag;
}
/* set coarse grid solver */
HYPRE_Int
hypre_MGRSetCoarseSolver( void *mgr_vdata,
HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*),
HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*),
void *coarse_grid_solver )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
(mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve;
(mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup;
(mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver;
(mgr_data -> use_default_cgrid_solver) = 0;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRSetAffInv( void *mgr_vdata,
hypre_ParCSRMatrix *A_ff_inv )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> A_ff_inv) = A_ff_inv;
return hypre_error_flag;
}
/* Set the maximum number of coarse levels.
* maxcoarselevs = 1 yields the default 2-grid scheme.
*/
HYPRE_Int
hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_num_coarse_levels) = maxcoarselevs;
return hypre_error_flag;
}
/* Set the system block size */
HYPRE_Int
hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> block_size) = bsize;
return hypre_error_flag;
}
/* Set the relaxation type for the fine levels of the reduction.
* Currently supports the following flavors of relaxation types
* as described in the documentation:
* relax_types 0 - 8, 13, 14, 18, 19, 98.
* See par_relax.c and par_relax_more.c for more details.
*
*/
HYPRE_Int
hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> relax_type) = relax_type;
return hypre_error_flag;
}
/* Set the number of relaxation sweeps */
HYPRE_Int
hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_relax_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_method) != NULL) {
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method;
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Set the F-relaxation strategy: 0=single level, 1=multi level */
HYPRE_Int
hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_method) != NULL) {
hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_method) = NULL;
}
HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (relax_method != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = relax_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_method[i] = 0;
}
}
(mgr_data -> Frelax_method) = Frelax_method;
return hypre_error_flag;
}
/* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/
HYPRE_Int
hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> use_non_galerkin_cg) != NULL) {
hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST);
(mgr_data -> use_non_galerkin_cg) = NULL;
}
HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (cg_method != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = cg_method[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
use_non_galerkin_cg[i] = 0;
}
}
(mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg;
return hypre_error_flag;
}
/* Set the F-relaxation number of functions for each level */
HYPRE_Int
hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if((mgr_data -> Frelax_num_functions) != NULL) {
hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST);
(mgr_data -> Frelax_num_functions) = NULL;
}
HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (num_functions != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = num_functions[i];
}
}
else
{
for (i = 0; i < max_num_coarse_levels; i++)
{
Frelax_num_functions[i] = 1;
}
}
(mgr_data -> Frelax_num_functions) = Frelax_num_functions;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (restrict_type != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = *(restrict_type + i);
}
}
else
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = 0;
}
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the type of the restriction type
* for computing restriction operator
*/
HYPRE_Int
hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> restrict_type) != NULL)
{
hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST);
(mgr_data -> restrict_type) = NULL;
}
HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
level_restrict_type[i] = restrict_type;
}
(mgr_data -> restrict_type) = level_restrict_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_restrict_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = interpType;
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the type of the interpolation
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
if ((mgr_data -> interp_type) != NULL)
{
hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST);
(mgr_data -> interp_type) = NULL;
}
HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST);
if (interpType != NULL)
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = *(interpType + i);
}
}
else
{
for (i=0; i < max_num_coarse_levels; i++)
{
level_interp_type[i] = 2;
}
}
(mgr_data -> interp_type) = level_interp_type;
return hypre_error_flag;
}
/* Set the number of Jacobi interpolation iterations
* for computing interpolation operator
*/
HYPRE_Int
hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> num_interp_sweeps) = nsweeps;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> print_level) = print_level;
return hypre_error_flag;
}
/* Set print level for mgr solver */
HYPRE_Int
hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> logging) = logging;
return hypre_error_flag;
}
/* Set max number of iterations for mgr solver */
HYPRE_Int
hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> max_iter) = max_iter;
return hypre_error_flag;
}
/* Set convergence tolerance for mgr solver */
HYPRE_Int
hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> tol) = tol;
return hypre_error_flag;
}
/* Set max number of iterations for mgr global smoother */
HYPRE_Int
hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_iters) = max_iter;
return hypre_error_flag;
}
/* Set global smoothing type for mgr solver */
HYPRE_Int
hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> global_smooth_type) = iter_type;
return hypre_error_flag;
}
/* Set the maximum number of non-zero entries for restriction
and interpolation operator if classical AMG interpolation is used */
HYPRE_Int
hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
(mgr_data -> P_max_elmts) = P_max_elmts;
return hypre_error_flag;
}
/* Get number of iterations for MGR solver */
HYPRE_Int
hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = mgr_data->num_iterations;
return hypre_error_flag;
}
/* Get residual norms for MGR solver */
HYPRE_Int
hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*res_norm = mgr_data->final_rel_residual_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*conv_factor = (mgr_data -> cg_convergence_factor);
return hypre_error_flag;
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A,
HYPRE_Int *row_cf_marker,
HYPRE_Int *col_cf_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_block_ptr )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *coarse_dof_func_ptr = NULL;
HYPRE_BigInt *num_row_cpts_global = NULL;
HYPRE_BigInt *num_col_cpts_global = NULL;
hypre_ParCSRMatrix *Ablock;
HYPRE_BigInt *col_map_offd_Ablock;
HYPRE_Int *tmp_map_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
hypre_CSRMatrix *Ablock_diag;
hypre_CSRMatrix *Ablock_offd;
HYPRE_Real *Ablock_diag_data;
HYPRE_Int *Ablock_diag_i;
HYPRE_Int *Ablock_diag_j;
HYPRE_Real *Ablock_offd_data;
HYPRE_Int *Ablock_offd_i;
HYPRE_Int *Ablock_offd_j;
HYPRE_Int Ablock_diag_size, Ablock_offd_size;
HYPRE_Int *Ablock_marker;
HYPRE_Int ii_counter;
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
HYPRE_Int *col_coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_row_cpts;
HYPRE_BigInt total_global_col_cpts;
HYPRE_Int num_cols_Ablock_offd;
// HYPRE_BigInt my_first_row_cpt, my_first_col_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
// HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
//num_threads = hypre_NumThreads();
// Temporary fix, disable threading
// TODO: enable threading
num_threads = 1;
/* get the number of coarse rows */
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global);
hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]);
#ifdef HYPRE_NO_GLOBAL_PARTITION
// my_first_row_cpt = num_row_cpts_global[0];
if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1];
hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
// my_first_row_cpt = num_row_cpts_global[my_id];
total_global_row_cpts = num_row_cpts_global[num_procs];
#endif
/* get the number of coarse rows */
hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global);
hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST);
coarse_dof_func_ptr = NULL;
//hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]);
#ifdef HYPRE_NO_GLOBAL_PARTITION
// my_first_col_cpt = num_col_cpts_global[0];
if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1];
hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
// my_first_col_cpt = num_col_cpts_global[my_id];
total_global_col_cpts = num_col_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
/*-----------------------------------------------------------------------
* First Pass: Determine size of Ablock and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (col_cf_marker[i] > 0)
{
fine_to_coarse[i] = col_coarse_counter[j];
col_coarse_counter[j]++;
}
if (row_cf_marker[i] > 0)
{
//fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
coarse_counter[i+1] += coarse_counter[i];
col_coarse_counter[i+1] += col_coarse_counter[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
ii_counter = coarse_counter[i];
Ablock_diag_size = jj_counter;
Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST);
Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST);
Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST);
Ablock_diag_i[ii_counter] = jj_counter;
Ablock_offd_size = jj_counter_offd;
Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST);
Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST);
Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
//-----------------------------------------------------------------------
// Send and receive fine_to_coarse info.
//-----------------------------------------------------------------------
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = col_coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
// if (debug_flag==4) wall_time = time_getWallclockSeconds();
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
// for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt;
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE
#endif
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
ii_counter = 0;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a F-point, we loop through the columns and select
* the F-columns. Also set up mapping vector.
*--------------------------------------------------------------------*/
if (row_cf_marker[i] > 0)
{
// Diagonal part of Ablock //
Ablock_diag_i[ii_counter] = jj_counter;
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (col_cf_marker[i1] > 0)
{
Ablock_diag_j[jj_counter] = fine_to_coarse[i1];
Ablock_diag_data[jj_counter] = A_diag_data[jj];
jj_counter++;
}
}
// Off-Diagonal part of Ablock //
Ablock_offd_i[ii_counter] = jj_counter_offd;
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
Ablock_offd_j[jj_counter_offd] = i1;
Ablock_offd_data[jj_counter_offd] = A_offd_data[jj];
jj_counter_offd++;
}
}
}
ii_counter++;
}
}
Ablock_offd_i[ii_counter] = jj_counter_offd;
Ablock_diag_i[ii_counter] = jj_counter;
}
Ablock = hypre_ParCSRMatrixCreate(comm,
total_global_row_cpts,
total_global_col_cpts,
num_row_cpts_global,
num_col_cpts_global,
0,
Ablock_diag_i[ii_counter],
Ablock_offd_i[ii_counter]);
Ablock_diag = hypre_ParCSRMatrixDiag(Ablock);
hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data;
hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i;
hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j;
Ablock_offd = hypre_ParCSRMatrixOffd(Ablock);
hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data;
hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i;
hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j;
num_cols_Ablock_offd = 0;
if (Ablock_offd_size)
{
Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < num_cols_A_offd; i++)
Ablock_marker[i] = 0;
num_cols_Ablock_offd = 0;
for (i=0; i < Ablock_offd_size; i++)
{
index = Ablock_offd_j[i];
if (!Ablock_marker[index])
{
num_cols_Ablock_offd++;
Ablock_marker[index] = 1;
}
}
col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i=0; i < num_cols_Ablock_offd; i++)
{
while (Ablock_marker[index]==0) index++;
tmp_map_offd[i] = index++;
}
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i=0; i < Ablock_offd_size; i++)
Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
Ablock_offd_j[i],
num_cols_Ablock_offd);
hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_Ablock_offd)
{
hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock;
hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd;
}
hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(Ablock);
}
#endif
*A_block_ptr= Ablock;
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
return(0);
}
/* Build A_FF matrix from A given a CF_marker array */
HYPRE_Int
hypre_MGRBuildAff( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_Int debug_flag,
hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int i;
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* create a copy of the CF_marker array and switch C-points to F-points */
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST);
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
#endif
for (i = 0; i < local_numrows; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr);
/* Free copy of CF marker */
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return(0);
}
/*********************************************************************************
* This routine assumes that the 'toVector' is larger than the 'fromVector' and
* the CF_marker is of the same length as the toVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'fromVector'.
* It adds the values of the 'fromVector' to the 'toVector' where the marker is the
* same as the 'point_type'
*********************************************************************************/
HYPRE_Int
hypre_MGRAddVectorP ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++) {
if (CF_marker[i] == point_type) {
toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j];
j++;
}
}
return 0;
}
/*************************************************************************************
* This routine assumes that the 'fromVector' is larger than the 'toVector' and
* the CF_marker is of the same length as the fromVector. There must be n 'point_type'
* values in the CF_marker, where n is the length of the 'toVector'.
* It adds the values of the 'fromVector' where the marker is the
* same as the 'point_type' to the 'toVector'
*************************************************************************************/
HYPRE_Int
hypre_MGRAddVectorR ( HYPRE_Int *CF_marker,
HYPRE_Int point_type,
HYPRE_Real a,
hypre_ParVector *fromVector,
HYPRE_Real b,
hypre_ParVector **toVector )
{
hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector);
HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal);
hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector);
HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal);
HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector);
HYPRE_Int i, j;
j = 0;
for (i = 0; i < n; i++) {
if (CF_marker[i] == point_type) {
toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i];
j++;
}
}
return 0;
}
/*
HYPRE_Int
hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions,
HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr,
hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr )
{
HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST);
HYPRE_Int i;
for (i = 0; i < local_num_variables; i++) {
CF_marker_copy[i] = -CF_marker[i];
}
hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr);
hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr);
hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr);
hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST);
return 0;
}
*/
/* Get pointer to coarse grid matrix for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> RAP == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n");
return hypre_error_flag;
}
*RAP = mgr_data->RAP;
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> U_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*sol = mgr_data->U_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Get pointer to coarse grid solution for MGR solver */
HYPRE_Int
hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs )
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
if (!mgr_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (mgr_data -> F_array == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n");
return hypre_error_flag;
}
*rhs = mgr_data->F_array[mgr_data->num_coarse_levels];
return hypre_error_flag;
}
/* Print coarse grid linear system (for debugging)*/
HYPRE_Int
hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
mgr_data->print_coarse_system = print_flag;
return hypre_error_flag;
}
/* Print solver params */
HYPRE_Int
hypre_MGRWriteSolverParams(void *mgr_vdata)
{
hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata;
HYPRE_Int i, j;
HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels);
hypre_printf("MGR Setup parameters: \n");
hypre_printf("Block size: %d\n", (mgr_data -> block_size));
hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels));
hypre_printf("Relax type: %d\n", (mgr_data -> relax_type));
hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F));
hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method));
for (i = 0; i < max_num_coarse_levels; i++)
{
hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]);
hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]);
hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]);
hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]);
HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i];
hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points);
hypre_printf("Cpoints indices: ");
for (j = 0; j < lvl_num_coarse_points; j++)
{
if ((mgr_data -> block_cf_marker)[i][j] == 1)
{
hypre_printf("%d ", j);
}
}
hypre_printf("\n");
}
hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size));
hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints));
hypre_printf("\n MGR Solver Parameters: \n");
hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps));
hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps));
hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps));
hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type));
hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters));
hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter));
hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol));
hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver));
if((mgr_data -> use_default_fsolver) >= 0)
{
hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver));
}
return hypre_error_flag;
}
|
par_lr_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
//HYPRE_BigInt *found;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa = 1.;
HYPRE_Real beta = 1.;
/* Loop variables */
// HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /*diag*/
loc_col = (HYPRE_Int)(big_k1 - col_1);
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal != 0) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal != 0) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal != 0) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(ahat, HYPRE_MEMORY_HOST);
hypre_TFree(ihat, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt, HYPRE_MEMORY_HOST);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST);
hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for(i=0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/* loop over rows */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i-1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i-1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start+1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num-1];
P_offd_i[i] += offd_offset[my_thread_num-1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if(fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED);
}
}
/* Fine to coarse mapping */
if(num_procs > 1 && my_thread_num == 0)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{ hypre_TFree(P_marker, HYPRE_MEMORY_HOST); }
if (full_off_procNodes)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(diag_offset, HYPRE_MEMORY_HOST);
hypre_TFree(offd_offset, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
/*HYPRE_Int ccounter_offd;
HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if(num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_BigInt)(-big_k1 - 1);
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = (-big_k1 - 1);
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = (HYPRE_Int)(-big_k1 - 1);
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/*HYPRE_Int ccounter;*/
HYPRE_Int found_c = 0;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = - (HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_BigInt *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_BigInt *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
HYPRE_BigInt big_k1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixBigJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* In S_diag */
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes, my_first_cpt,
fine_to_coarse_offd);
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
big_k1 = Sop_j[kk];
/* Find local col number */
if(big_k1 >= col_1 && big_k1 < col_n)
{
loc_col = (HYPRE_Int)(big_k1-col_1);
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
big_k1 = A_ext_j[jj1];
if(big_k1 >= col_1 && big_k1 < col_n)
{ /* diag */
loc_col = (HYPRE_Int)(big_k1 - col_1);
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -(HYPRE_Int)big_k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST);
if(num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
|
2.schedule.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 12
/* Q1: Which iterations of the loops are executed by each thread */
/* for each schedule kind? */
int main()
{
int i;
omp_set_num_threads(3);
#pragma omp parallel
{
#pragma omp for schedule(static)
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 1: (%d) gets iteration %d\n",id,i);
}
#pragma omp for schedule(static, 2)
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 2: (%d) gets iteration %d\n",id,i);
}
#pragma omp for schedule(dynamic,2)
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 3: (%d) gets iteration %d\n",id,i);
}
#pragma omp for schedule(guided,2)
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 4: (%d) gets iteration %d\n",id,i);
}
}
return 0;
}
|
PeptideIndexing.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2018.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/DATASTRUCTURES/SeqanIncludeWrapper.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/METADATA/PeptideEvidence.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <atomic>
#include <algorithm>
#include <fstream>
namespace OpenMS
{
/**
@brief Refreshes the protein references for all peptide hits in a vector of PeptideIdentifications and adds target/decoy information.
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy". For proteins the possible values are "target" and "decoy",
depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string) as a suffix or prefix, respectively (see parameter @p prefix).
For peptides, the possible values are "target", "decoy" and "target+decoy", depending on whether the peptide sequence is found only in target proteins,
only in decoy proteins, or in both. The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool.
(For FDR calculations, "target+decoy" peptide hits count as target hits.)
@note Make sure that your protein names in the database contain a correctly formatted decoy string. This can be ensured by using @ref UTILS_DecoyDatabase.
If the decoy identifier is not recognized successfully all proteins will be assumed to stem from the target-part of the query.<br>
E.g., "sw|P33354_DECOY|YEHR_ECOLI Uncharacterized lipop..." is <b>invalid</b>, since the tool has no knowledge of how SwissProt entries are build up.
A correct identifier could be "DECOY_sw|P33354|YEHR_ECOLI Uncharacterized li ..." or "sw|P33354|YEHR_ECOLI_DECOY Uncharacterized li", depending on whether you are
using prefix or suffix annotation.<br>
Some helpful target/decoy statistics will be reported when done.
By default this tool will fail if an unmatched peptide occurs, i.e. if the database does not contain the corresponding protein.
You can force it to return successfully in this case by using the flag @p allow_unmatched.
Search engines (such as Mascot) will replace ambiguous amino acids ('B', 'J', 'Z' and 'X') in the protein database with unambiguous amino acids in the reported peptides, e.g. exchange 'X' with 'H'.
This will cause such peptides to not be found by exactly matching their sequences to the protein database.
However, we can recover these cases by using tolerant search for ambiguous amino acids in the protein sequence. This is done by default with up to four amino acids
per peptide hit. If you only want exact matches, set @p aaa_max to zero (but expect that unmatched peptides might occur)!
Leucine/Isoleucine:
Further complications can arise due to the presence of the isobaric amino acids isoleucine ('I') and leucine ('L') in protein sequences.
Since the two have the exact same chemical composition and mass, they generally cannot be distinguished by mass spectrometry.
If a peptide containing 'I' was reported as a match for a spectrum, a peptide containing 'L' instead would be an equally good match (and vice versa).
To account for this inherent ambiguity, setting the flag @p IL_equivalent causes 'I' and 'L' to be considered as indistinguishable.@n
For example, if the sequence "PEPTIDE" (matching "Protein1") was identified as a search hit,
but the database additionally contained "PEPTLDE" (matching "Protein2"), running PeptideIndexer with the @p IL_equivalent option would
report both "Protein1" and "Protein2" as accessions for "PEPTIDE".
(This is independent of ambiguous matching via @p aaa_max.)
Additionally, setting this flag will convert all 'J's in any protein sequence to 'I'. This way, no tolerant search is required for 'J' (but is still possible for all
the other ambiguous amino acids).
If @p write_protein_sequences is requested and @p IL_equivalent is set as well, both the I/L-version and unmodified protein sequences need to be stored internally.
This requires some extra memory, roughly equivalent to the size of the FASTA database file itself.
Enzyme specificity:
Once a peptide sequence is found in a protein sequence, this does <b>not</b> imply that the hit is valid! This is where enzyme specificity comes into play.
By default, we demand that the peptide is fully tryptic (i.e. the enzyme parameter is set to "trypsin" and specificity is "full").
So unless the peptide coincides with C- and/or N-terminus of the protein, the peptide's cleavage pattern should fulfill the trypsin cleavage rule [KR][^P].
We make two exceptions to the specificity constraints:
1) for peptides starting at the second or third position of a protein are still considered N-terminally specific,
since the residues can be cleaved off in vivo; X!Tandem reports these peptides. For example, the two peptides ABAR and LABAR would both match a protein starting with MLABAR.
2) adventitious cleavage at Asp|Pro (Aspartate/D | Proline/P) is allowed for all enzymes (as supported by X!Tandem), i.e. counts as a proper cleavage site (see http://www.thegpm.org/tandem/release.html).
You can relax the requirements further by choosing <tt>semi-tryptic</tt> (only one of two "internal" termini must match requirements)
or <tt>none</tt> (essentially allowing all hits, no matter their context). These settings should not be used (due to high risk of reporting false positives),
unless the search engine was instructed to search peptides in the same way.
The FASTA file should not contain duplicate protein accessions (since accessions are not validated) if a correct unique-matching annotation is important (target/decoy annotation is still correct).
Threading:
This tool support multiple threads (@p threads option) to speed up computation, at the cost of little extra memory.
*/
class OPENMS_DLLAPI PeptideIndexing :
public DefaultParamHandler, public ProgressLogger
{
public:
/// Exit codes
enum ExitCodes
{
EXECUTION_OK,
DATABASE_EMPTY,
PEPTIDE_IDS_EMPTY,
ILLEGAL_PARAMETERS,
UNEXPECTED_RESULT,
DECOYSTRING_EMPTY,
};
/// Default constructor
PeptideIndexing();
/// Default destructor
~PeptideIndexing() override;
/// forward for old interface and pyOpenMS; use run<T>() for more control
inline ExitCodes run(std::vector<FASTAFile::FASTAEntry>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
FASTAContainer<TFI_Vector> protein_container(proteins);
return run<TFI_Vector>(protein_container, prot_ids, pep_ids);
}
/**
@brief Re-index peptide identifications honoring enzyme cutting rules, ambiguous amino acids and target/decoy hits.
Template parameter 'T' can be either TFI_File or TFI_Vector. If the data is already available, use TFI_Vector and pass the vector.
If the data is still in a FASTA file and its not needed afterwards for additional processing, use TFI_File and pass the filename.
PeptideIndexer refreshes target/decoy information and mapping of peptides to proteins.
The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool. (For FDR calculations, "target+decoy" peptide hits count as target hits.)
PeptideIndexer allows for ambiguous amino acids (B|J|Z|X) in the protein database, but not in the peptide sequences.
For the latter only I/L can be treated as equivalent (see 'IL_equivalent' flag), but 'J' is not allowed.
Enzyme cutting rules and partial specificity can be specified.
Resulting protein hits appear in the order of the FASTA file, except for orphaned proteins, which will appear first with an empty target_decoy metavalue.
Duplicate protein accessions & sequences will not raise a warning, but create multiple hits (PeptideIndexer scans over the FASTA file once for efficiency
reasons, and thus might not see all accessions & sequences at once).
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy".
For proteins the possible values are "target" and "decoy", depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string)
as a suffix or prefix, respectively (see parameter @p prefix).
Peptide hits are annotated with metavalue 'protein_references', and if matched to at least one protein also with metavalue 'target_decoy'.
The possible values for 'target_decoy' are "target", "decoy" and "target+decoy",
depending on whether the peptide sequence is found only in target proteins, only in decoy proteins, or in both. The metavalue is not present, if the peptide is unmatched.
Runtime: PeptideIndexer is usually very fast (loading and storing the data takes the most time) and search speed can be further improved (linearly), but using more threads.
Avoid allowing too many (>=4) ambiguous amino acids if your database contains long stretches of 'X' (exponential search space).
@param proteins A list of proteins -- either read piecewise from a FASTA file or as existing vector of FASTAEntries.
@param prot_ids Resulting protein identifications associated to pep_ids (will be re-written completely)
@param pep_ids Peptide identifications which should be search within @p proteins and then linked to @p prot_ids
@return Exit status codes.
*/
template<typename T>
ExitCodes run(FASTAContainer<T>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
// no decoy string provided? try to deduce from data
if (decoy_string_.empty())
{
bool is_decoy_string_auto_successful = findDecoyString_(proteins);
if (!is_decoy_string_auto_successful && contains_decoys_)
{
return DECOYSTRING_EMPTY;
}
else if (!is_decoy_string_auto_successful && !contains_decoys_)
{
OPENMS_LOG_WARN << "Unable to determine decoy string automatically, not enough decoys were detected! Using default " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "\n"
<< "If you think that this is false, please provide a decoy_string and its position manually!" << std::endl;
}
else
{
// decoy string and position was extracted successfully
OPENMS_LOG_INFO << "Using " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "'" << std::endl;
}
proteins.reset();
}
//---------------------------------------------------------------
// parsing parameters, correcting xtandem and MSGFPlus parameters
//---------------------------------------------------------------
ProteaseDigestion enzyme;
enzyme.setEnzyme(enzyme_name_);
enzyme.setSpecificity(enzyme.getSpecificityByName(enzyme_specificity_));
bool xtandem_fix_parameters = true, msgfplus_fix_parameters = true;
// specificity is none or semi? don't automate xtandem
if (enzyme.getSpecificity() == EnzymaticDigestion::SPEC_SEMI ||
enzyme.getSpecificity() == EnzymaticDigestion::SPEC_NONE)
{
xtandem_fix_parameters = false;
}
// enzyme is already Trypsin/P? don't automate MSGFPlus
if (enzyme.getEnzymeName() == "Trypsin/P") { msgfplus_fix_parameters = false; }
// determine if search engine is solely xtandem or MSGFPlus
for (const auto& prot_id : prot_ids)
{
if (!msgfplus_fix_parameters && !xtandem_fix_parameters) { break; }
String se = prot_id.getSearchEngine();
std::string search_engine = StringUtils::toUpper(se);
if (search_engine != "XTANDEM") { xtandem_fix_parameters = false; }
if (search_engine != "MSGFPLUS" || "MS-GF+") { msgfplus_fix_parameters = false; }
}
// solely MSGFPlus -> Trypsin P as enzyme
if (msgfplus_fix_parameters && enzyme.getEnzymeName() == "Trypsin")
{
OPENMS_LOG_WARN << "MSGFPlus detected but enzyme cutting rules were set to Trypsin. Correcting to Trypsin/P to copy with special cutting rule in MSGFPlus." << std::endl;
enzyme.setEnzyme("Trypsin/P");
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
this->startProgress(0, 1, "Load first chunk");
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
this->endProgress();
if (proteins.empty()) // we do not allow an empty database
{
OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting..." << std::endl;
return DATABASE_EMPTY;
}
if (pep_ids.empty()) // Aho-Corasick requires non-empty input; but we allow this case, since the TOPP tool should not crash when encountering a bad raw file (with no PSMs)
{
OPENMS_LOG_WARN << "Warning: An empty set of peptide identifications was provided. Output will be empty as well." << std::endl;
if (!keep_unreferenced_proteins_)
{
// delete only protein hits, not whole ID runs incl. meta data:
for (std::vector<ProteinIdentification>::iterator it = prot_ids.begin();
it != prot_ids.end(); ++it)
{
it->getHits().clear();
}
}
return PEPTIDE_IDS_EMPTY;
}
FoundProteinFunctor func(enzyme, xtandem_fix_parameters); // store the matches
Map<String, Size> acc_to_prot; // map: accessions --> FASTA protein index
std::vector<bool> protein_is_decoy; // protein index -> is decoy?
std::vector<std::string> protein_accessions; // protein index -> accession
bool invalid_protein_sequence = false; // check for proteins with modifications, i.e. '[' or '(', and throw an exception
{ // new scope - forget data after search
/*
BUILD Peptide DB
*/
bool has_illegal_AAs(false);
AhoCorasickAmbiguous::PeptideDB pep_DB;
for (std::vector<PeptideIdentification>::const_iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
//String run_id = it1->getIdentifier();
const std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::const_iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
//
// Warning:
// do not skip over peptides here, since the results are iterated in the same way
//
String seq = it2->getSequence().toUnmodifiedString().remove('*'); // make a copy, i.e. do NOT change the peptide sequence!
if (seqan::isAmbiguous(seqan::AAString(seq.c_str())))
{ // do not quit here, to show the user all sequences .. only quit after loop
OPENMS_LOG_ERROR << "Peptide sequence '" << it2->getSequence() << "' contains one or more ambiguous amino acids (B|J|Z|X).\n";
has_illegal_AAs = true;
}
if (IL_equivalent_) // convert L to I;
{
seq.substitute('L', 'I');
}
appendValue(pep_DB, seq.c_str());
}
}
if (has_illegal_AAs)
{
OPENMS_LOG_ERROR << "One or more peptides contained illegal amino acids. This is not allowed!"
<< "\nPlease either remove the peptide or replace it with one of the unambiguous ones (while allowing for ambiguous AA's to match the protein)." << std::endl;;
}
OPENMS_LOG_INFO << "Mapping " << length(pep_DB) << " peptides to " << (proteins.size() == PROTEIN_CACHE_SIZE ? "? (unknown number of)" : String(proteins.size())) << " proteins." << std::endl;
if (length(pep_DB) == 0)
{ // Aho-Corasick will crash if given empty needles as input
OPENMS_LOG_WARN << "Warning: Peptide identifications have no hits inside! Output will be empty as well." << std::endl;
return PEPTIDE_IDS_EMPTY;
}
/*
Aho Corasick (fast)
*/
OPENMS_LOG_INFO << "Searching with up to " << aaa_max_ << " ambiguous amino acid(s) and " << mm_max_ << " mismatch(es)!" << std::endl;
SysInfo::MemUsage mu;
OPENMS_LOG_INFO << "Building trie ...";
StopWatch s;
s.start();
AhoCorasickAmbiguous::FuzzyACPattern pattern;
AhoCorasickAmbiguous::initPattern(pep_DB, aaa_max_, mm_max_, pattern);
s.stop();
OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)" << std::endl;
s.reset();
uint16_t count_j_proteins(0);
bool has_active_data = true; // becomes false if end of FASTA file is reached
const std::string jumpX(aaa_max_ + mm_max_ + 1, 'X'); // jump over stretches of 'X' which cost a lot of time; +1 because AXXA is a valid hit for aaa_max == 2 (cannot split it)
// use very large target value for progress if DB size is unknown (did not fit into first chunk)
this->startProgress(0, proteins.size() == PROTEIN_CACHE_SIZE ? std::numeric_limits<SignedSize>::max() : proteins.size(), "Aho-Corasick");
std::atomic<int> progress_prots(0);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
FoundProteinFunctor func_threads(enzyme, xtandem_fix_parameters);
Map<String, Size> acc_to_prot_thread; // map: accessions --> FASTA protein index
AhoCorasickAmbiguous fuzzyAC;
String prot;
while (true)
{
#pragma omp barrier // all threads need to be here, since we are about to swap protein data
#pragma omp single
{
DEBUG_ONLY std::cerr << " activating cache ...\n";
has_active_data = proteins.activateCache(); // swap in last cache
protein_accessions.resize(proteins.getChunkOffset() + proteins.chunkSize());
} // implicit barrier here
if (!has_active_data) break; // leave while-loop
SignedSize prot_count = (SignedSize)proteins.chunkSize();
#pragma omp master
{
DEBUG_ONLY std::cerr << "Filling Protein Cache ...";
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
protein_is_decoy.resize(proteins.getChunkOffset() + prot_count);
for (SignedSize i = 0; i < prot_count; ++i)
{ // do this in master only, to avoid false sharing
const String& seq = proteins.chunkAt(i).identifier;
protein_is_decoy[i + proteins.getChunkOffset()] = (prefix_ ? seq.hasPrefix(decoy_string_) : seq.hasSuffix(decoy_string_));
}
DEBUG_ONLY std::cerr << " done" << std::endl;
}
DEBUG_ONLY std::cerr << " starting for loop \n";
// search all peptides in each protein
#pragma omp for schedule(dynamic, 100) nowait
for (SignedSize i = 0; i < prot_count; ++i)
{
++progress_prots; // atomic
if (omp_get_thread_num() == 0)
{
this->setProgress(progress_prots);
}
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent_)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
#pragma omp atomic
++count_j_proteins;
}
}
Size prot_idx = i + proteins.getChunkOffset();
// test if protein was a hit
Size hits_total = func_threads.filter_passed + func_threads.filter_rejected;
// check if there are stretches of 'X'
if (prot.has('X'))
{
// create chunks of the protein (splitting it at stretches of 'X..X') and feed them to AC one by one
size_t offset = -1, start = 0;
while ((offset = prot.find(jumpX, offset + 1)) != std::string::npos)
{
//std::cout << "found X..X at " << offset << " in protein " << proteins[i].identifier << "\n";
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start, offset + jumpX.size() - start), prot, prot_idx, (int)start, func_threads);
// skip ahead while we encounter more X...
while (offset + jumpX.size() < prot.size() && prot[offset + jumpX.size()] == 'X') ++offset;
start = offset;
//std::cout << " new start: " << start << "\n";
}
// last chunk
if (start < prot.size())
{
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start), prot, prot_idx, (int)start, func_threads);
}
}
else
{
addHits_(fuzzyAC, pattern, pep_DB, prot, prot, prot_idx, 0, func_threads);
}
// was protein found?
if (hits_total < func_threads.filter_passed + func_threads.filter_rejected)
{
protein_accessions[prot_idx] = proteins.chunkAt(i).identifier;
acc_to_prot_thread[protein_accessions[prot_idx]] = prot_idx;
}
} // end parallel FOR
// join results again
DEBUG_ONLY std::cerr << " critical now \n";
#ifdef _OPENMP
#pragma omp critical(PeptideIndexer_joinAC)
#endif
{
s.start();
// hits
func.merge(func_threads);
// accession -> index
acc_to_prot.insert(acc_to_prot_thread.begin(), acc_to_prot_thread.end());
acc_to_prot_thread.clear();
s.stop();
} // OMP end critical
} // end readChunk
} // OMP end parallel
this->endProgress();
std::cout << "Merge took: " << s.toString() << "\n";
mu.after();
std::cout << mu.delta("Aho-Corasick") << "\n\n";
OPENMS_LOG_INFO << "\nAho-Corasick done:\n found " << func.filter_passed << " hits for " << func.pep_to_prot.size() << " of " << length(pep_DB) << " peptides.\n";
// write some stats
OPENMS_LOG_INFO << "Peptide hits passing enzyme filter: " << func.filter_passed << "\n"
<< " ... rejected by enzyme filter: " << func.filter_rejected << std::endl;
if (count_j_proteins)
{
OPENMS_LOG_WARN << "PeptideIndexer found " << count_j_proteins << " protein sequences in your database containing the amino acid 'J'."
<< "To match 'J' in a protein, an ambiguous amino acid placeholder for I/L will be used.\n"
<< "This costs runtime and eats into the 'aaa_max' limit, leaving less opportunity for B/Z/X matches.\n"
<< "If you want 'J' to be treated as unambiguous, enable '-IL_equivalent'!" << std::endl;
}
} // end local scope
//
// do mapping
//
// index existing proteins
Map<String, Size> runid_to_runidx; // identifier to index
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
runid_to_runidx[prot_ids[run_idx].getIdentifier()] = run_idx;
}
// for peptides --> proteins
Size stats_matched_unique(0);
Size stats_matched_multi(0);
Size stats_unmatched(0); // no match to DB
Size stats_count_m_t(0); // match to Target DB
Size stats_count_m_d(0); // match to Decoy DB
Size stats_count_m_td(0); // match to T+D DB
Map<Size, std::set<Size> > runidx_to_protidx; // in which protID do appear which proteins (according to mapped peptides)
Size pep_idx(0);
for (std::vector<PeptideIdentification>::iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
// which ProteinIdentification does the peptide belong to?
Size run_idx = runid_to_runidx[it1->getIdentifier()];
std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
// clear protein accessions
it2->setPeptideEvidences(std::vector<PeptideEvidence>());
//
// is this a decoy hit?
//
bool matches_target(false);
bool matches_decoy(false);
std::set<Size> prot_indices; /// protein hits of this peptide
// add new protein references
for (std::set<PeptideProteinMatchInformation>::const_iterator it_i = func.pep_to_prot[pep_idx].begin();
it_i != func.pep_to_prot[pep_idx].end(); ++it_i)
{
prot_indices.insert(it_i->protein_index);
const String& accession = protein_accessions[it_i->protein_index];
PeptideEvidence pe(accession, it_i->position, it_i->position + (int)it2->getSequence().size() - 1, it_i->AABefore, it_i->AAAfter);
it2->addPeptideEvidence(pe);
runidx_to_protidx[run_idx].insert(it_i->protein_index); // fill protein hits
if (protein_is_decoy[it_i->protein_index])
{
matches_decoy = true;
}
else
{
matches_target = true;
}
}
if (matches_decoy && matches_target)
{
it2->setMetaValue("target_decoy", "target+decoy");
++stats_count_m_td;
}
else if (matches_target)
{
it2->setMetaValue("target_decoy", "target");
++stats_count_m_t;
}
else if (matches_decoy)
{
it2->setMetaValue("target_decoy", "decoy");
++stats_count_m_d;
} // else: could match to no protein (i.e. both are false)
//else ... // not required (handled below; see stats_unmatched);
if (prot_indices.size() == 1)
{
it2->setMetaValue("protein_references", "unique");
++stats_matched_unique;
}
else if (prot_indices.size() > 1)
{
it2->setMetaValue("protein_references", "non-unique");
++stats_matched_multi;
}
else
{
it2->setMetaValue("protein_references", "unmatched");
++stats_unmatched;
if (stats_unmatched < 15) OPENMS_LOG_INFO << "Unmatched peptide: " << it2->getSequence() << "\n";
else if (stats_unmatched == 15) OPENMS_LOG_INFO << "Unmatched peptide: ...\n";
}
++pep_idx; // next hit
}
}
Size total_peptides = stats_count_m_t + stats_count_m_d + stats_count_m_td + stats_unmatched;
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Peptide statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " unmatched : " << stats_unmatched << " (" << stats_unmatched * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " target/decoy:\n";
OPENMS_LOG_INFO << " match to target DB only: " << stats_count_m_t << " (" << stats_count_m_t * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to decoy DB only : " << stats_count_m_d << " (" << stats_count_m_d * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to both : " << stats_count_m_td << " (" << stats_count_m_td * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " mapping to proteins:\n";
OPENMS_LOG_INFO << " no match (to 0 protein) : " << stats_unmatched << "\n";
OPENMS_LOG_INFO << " unique match (to 1 protein) : " << stats_matched_unique << "\n";
OPENMS_LOG_INFO << " non-unique match (to >1 protein): " << stats_matched_multi << std::endl;
/// for proteins --> peptides
Size stats_matched_proteins(0), stats_matched_new_proteins(0), stats_orphaned_proteins(0), stats_proteins_target(0), stats_proteins_decoy(0);
// all peptides contain the correct protein hit references, now update the protein hits
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
std::set<Size> masterset = runidx_to_protidx[run_idx]; // all protein matches from above
std::vector<ProteinHit>& phits = prot_ids[run_idx].getHits();
{
// go through existing protein hits and count orphaned proteins (with no peptide hits)
std::vector<ProteinHit> orphaned_hits;
for (std::vector<ProteinHit>::iterator p_hit = phits.begin(); p_hit != phits.end(); ++p_hit)
{
const String& acc = p_hit->getAccession();
if (!acc_to_prot.has(acc)) // acc_to_prot only contains found proteins from current run
{ // old hit is orphaned
++stats_orphaned_proteins;
if (keep_unreferenced_proteins_)
{
p_hit->setMetaValue("target_decoy", "");
orphaned_hits.push_back(*p_hit);
}
}
}
// only keep orphaned hits (if any)
phits = orphaned_hits;
}
// add new protein hits
FASTAFile::FASTAEntry fe;
phits.reserve(phits.size() + masterset.size());
for (std::set<Size>::const_iterator it = masterset.begin(); it != masterset.end(); ++it)
{
ProteinHit hit;
hit.setAccession(protein_accessions[*it]);
if (write_protein_sequence_ || write_protein_description_)
{
proteins.readAt(fe, *it);
if (write_protein_sequence_)
{
hit.setSequence(fe.sequence);
} // no else, since sequence is empty by default
if (write_protein_description_)
{
hit.setDescription(fe.description);
} // no else, since description is empty by default
}
if (protein_is_decoy[*it])
{
hit.setMetaValue("target_decoy", "decoy");
++stats_proteins_decoy;
}
else
{
hit.setMetaValue("target_decoy", "target");
++stats_proteins_target;
}
phits.push_back(hit);
++stats_matched_new_proteins;
}
stats_matched_proteins += phits.size();
}
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Protein statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " total proteins searched: " << proteins.size() << "\n";
OPENMS_LOG_INFO << " matched proteins : " << stats_matched_proteins << " (" << stats_matched_new_proteins << " new)\n";
if (stats_matched_proteins)
{ // prevent Division-by-0 Exception
OPENMS_LOG_INFO << " matched target proteins: " << stats_proteins_target << " (" << stats_proteins_target * 100 / stats_matched_proteins << " %)\n";
OPENMS_LOG_INFO << " matched decoy proteins : " << stats_proteins_decoy << " (" << stats_proteins_decoy * 100 / stats_matched_proteins << " %)\n";
}
OPENMS_LOG_INFO << " orphaned proteins : " << stats_orphaned_proteins << (keep_unreferenced_proteins_ ? " (all kept)" : " (all removed)\n");
OPENMS_LOG_INFO << "-----------------------------------" << std::endl;
/// exit if no peptides were matched to decoy
bool has_error = false;
if (invalid_protein_sequence)
{
OPENMS_LOG_ERROR << "Error: One or more protein sequences contained the characters '[' or '(', which are illegal in protein sequences."
<< "\nPeptide hits might be masked by these characters (which usually indicate presence of modifications).\n";
has_error = true;
}
if ((stats_count_m_d + stats_count_m_td) == 0)
{
String msg("No peptides were matched to the decoy portion of the database! Did you provide the correct concatenated database? Are your 'decoy_string' (=" + String(decoy_string_) + ") and 'decoy_string_position' (=" + String(param_.getValue("decoy_string_position")) + ") settings correct?");
if (missing_decoy_action_ == "error")
{
OPENMS_LOG_ERROR << "Error: " << msg << "\nSet 'missing_decoy_action' to 'warn' if you are sure this is ok!\nAborting ..." << std::endl;
has_error = true;
}
else if (missing_decoy_action_ == "warn")
{
OPENMS_LOG_WARN << "Warn: " << msg << "\nSet 'missing_decoy_action' to 'error' if you want to elevate this to an error!" << std::endl;
}
else // silent
{
}
}
if ((!allow_unmatched_) && (stats_unmatched > 0))
{
OPENMS_LOG_ERROR << "PeptideIndexer found unmatched peptides, which could not be associated to a protein.\n"
<< "Potential solutions:\n"
<< " - check your FASTA database for completeness\n"
<< " - set 'enzyme:specificity' to match the identification parameters of the search engine\n"
<< " - some engines (e.g. X! Tandem) employ loose cutting rules generating non-tryptic peptides;\n"
<< " if you trust them, disable enzyme specificity\n"
<< " - increase 'aaa_max' to allow more ambiguous amino acids\n"
<< " - as a last resort: use the 'allow_unmatched' option to accept unmatched peptides\n"
<< " (note that unmatched peptides cannot be used for FDR calculation or quantification)\n";
has_error = true;
}
if (has_error)
{
OPENMS_LOG_ERROR << "Result files will be written, but PeptideIndexer will exit with an error code." << std::endl;
return UNEXPECTED_RESULT;
}
return EXECUTION_OK;
}
const String& getDecoyString() const;
bool isPrefix() const;
protected:
using DecoyStringToAffixCount = std::map<std::string, std::pair<int, int>>;
using CaseInsensitiveToCaseSensitiveDecoy = std::map<std::string, std::string>;
bool contains_decoys_;
template<typename T>
bool findDecoyString_(FASTAContainer<T>& proteins)
{
// common decoy strings in FASTA files
// note: decoy prefixes/suffices must be provided in lower case
std::vector<std::string> affixes = {"decoy", "dec", "reverse", "rev", "__id_decoy", "xxx", "shuffled", "shuffle", "pseudo", "random"};
// map decoys to counts of occurrences as prefix/suffix
DecoyStringToAffixCount decoy_count;
// map case insensitive strings back to original case (as used in fasta)
CaseInsensitiveToCaseSensitiveDecoy decoy_case_sensitive;
// assume that it contains decoys for now
contains_decoys_ = true;
// setup prefix- and suffix regex strings
const std::string regexstr_prefix = std::string("^(") + ListUtils::concatenate<std::string>(affixes, "_*|") + "_*)";
const std::string regexstr_suffix = std::string("(") + ListUtils::concatenate<std::string>(affixes, "_*|") + "_*)$";
// setup regexes
const boost::regex pattern_prefix(regexstr_prefix);
const boost::regex pattern_suffix(regexstr_suffix);
int all_prefix_occur(0), all_suffix_occur(0), all_proteins_count(0);
const size_t PROTEIN_CACHE_SIZE = 4e5;
while (true)
{
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
if (!proteins.activateCache()) break;
auto prot_count = (SignedSize) proteins.chunkSize();
all_proteins_count += prot_count;
{
for (SignedSize i = 0; i < prot_count; ++i)
{
String seq = proteins.chunkAt(i).identifier;
String seq_lower = seq;
seq_lower.toLower();
boost::smatch sm;
// search for prefix
bool found_prefix = boost::regex_search(seq_lower, sm, pattern_prefix);
if (found_prefix)
{
std::string match = sm[0];
all_prefix_occur++;
// increase count of observed prefix
decoy_count[match].first++;
// store observed (case sensitive and with special characters)
std::string seq_decoy = StringUtils::prefix(seq, match.length());
decoy_case_sensitive[match] = seq_decoy;
}
// search for suffix
bool found_suffix = boost::regex_search(seq_lower, sm, pattern_suffix);
if (found_suffix)
{
std::string match = sm[0];
all_suffix_occur++;
// increase count of observed suffix
decoy_count[match].second++;
// store observed (case sensitive and with special characters)
std::string seq_decoy = StringUtils::suffix(seq, match.length());
decoy_case_sensitive[match] = seq_decoy;
}
}
}
}
// DEBUG ONLY: print counts of found decoys
for (auto &a : decoy_count) OPENMS_LOG_DEBUG << a.first << "\t" << a.second.first << "\t" << a.second.second << std::endl;
// less than 40% of proteins are decoys -> won't be able to determine a decoy string and its position
// return default values
if (all_prefix_occur + all_suffix_occur < 0.4 * all_proteins_count) {
decoy_string_ = "DECOY_";
prefix_ = true;
contains_decoys_ = false;
return false;
}
if (all_prefix_occur == all_suffix_occur)
{
OPENMS_LOG_ERROR << "Unable to determine decoy string!" << std::endl;
return false;
}
// Decoy prefix occurred at least 80% of all prefixes + observed in at least 40% of all proteins -> set it as prefix decoy
for (const auto& pair : decoy_count)
{
const std::string & case_insensitive_decoy_string = pair.first;
const std::pair<int, int>& prefix_suffix_counts = pair.second;
double freq_prefix = static_cast<double>(prefix_suffix_counts.first) / static_cast<double>(all_prefix_occur);
double freq_prefix_in_proteins = static_cast<double>(prefix_suffix_counts.first) / static_cast<double>(all_proteins_count);
if (freq_prefix >= 0.8 && freq_prefix_in_proteins >= 0.4)
{
prefix_ = true;
decoy_string_ = decoy_case_sensitive[case_insensitive_decoy_string];
if (prefix_suffix_counts.first != all_prefix_occur)
{
OPENMS_LOG_WARN << "More than one decoy prefix observed!" << std::endl;
OPENMS_LOG_WARN << "Using most frequent decoy prefix (" << (int) (freq_prefix * 100) <<"%)" << std::endl;
}
return true;
}
}
// Decoy suffix occurred at least 80% of all suffixes + observed in at least 40% of all proteins -> set it as suffix decoy
for (const auto& pair : decoy_count)
{
const std::string& case_insensitive_decoy_string = pair.first;
const std::pair<int, int>& prefix_suffix_counts = pair.second;
double freq_suffix = static_cast<double>(prefix_suffix_counts.second) / static_cast<double>(all_suffix_occur);
double freq_suffix_in_proteins = static_cast<double>(prefix_suffix_counts.second) / static_cast<double>(all_proteins_count);
if (freq_suffix >= 0.8 && freq_suffix_in_proteins >= 0.4)
{
prefix_ = false;
decoy_string_ = decoy_case_sensitive[case_insensitive_decoy_string];
if (prefix_suffix_counts.second != all_suffix_occur)
{
OPENMS_LOG_WARN << "More than one decoy suffix observed!" << std::endl;
OPENMS_LOG_WARN << "Using most frequent decoy suffix (" << (int) (freq_suffix * 100) <<"%)" << std::endl;
}
return true;
}
}
OPENMS_LOG_ERROR << "Unable to determine decoy string and its position. Please provide a decoy string and its position as parameters." << std::endl;
return false;
}
struct PeptideProteinMatchInformation
{
/// index of the protein the peptide is contained in
OpenMS::Size protein_index;
/// the position of the peptide in the protein
OpenMS::Int position;
/// the amino acid after the peptide in the protein
char AABefore;
/// the amino acid before the peptide in the protein
char AAAfter;
bool operator<(const PeptideProteinMatchInformation& other) const
{
if (protein_index != other.protein_index)
{
return protein_index < other.protein_index;
}
else if (position != other.position)
{
return position < other.position;
}
else if (AABefore != other.AABefore)
{
return AABefore < other.AABefore;
}
else if (AAAfter != other.AAAfter)
{
return AAAfter < other.AAAfter;
}
return false;
}
bool operator==(const PeptideProteinMatchInformation& other) const
{
return protein_index == other.protein_index &&
position == other.position &&
AABefore == other.AABefore &&
AAAfter == other.AAAfter;
}
};
struct FoundProteinFunctor
{
public:
typedef std::map<OpenMS::Size, std::set<PeptideProteinMatchInformation> > MapType;
/// peptide index --> protein indices
MapType pep_to_prot;
/// number of accepted hits (passing addHit() constraints)
OpenMS::Size filter_passed;
/// number of rejected hits (not passing addHit())
OpenMS::Size filter_rejected;
private:
ProteaseDigestion enzyme_;
/// are we checking xtandem cleavage rules?
bool xtandem_;
public:
explicit FoundProteinFunctor(const ProteaseDigestion& enzyme, bool xtandem) :
pep_to_prot(), filter_passed(0), filter_rejected(0), enzyme_(enzyme), xtandem_(xtandem)
{
}
void merge(FoundProteinFunctor& other)
{
if (pep_to_prot.empty())
{ // first merge is easy
pep_to_prot.swap(other.pep_to_prot);
}
else
{
for (FoundProteinFunctor::MapType::const_iterator it = other.pep_to_prot.begin(); it != other.pep_to_prot.end(); ++it)
{ // augment set
this->pep_to_prot[it->first].insert(other.pep_to_prot[it->first].begin(), other.pep_to_prot[it->first].end());
}
other.pep_to_prot.clear();
}
// cheap members
this->filter_passed += other.filter_passed;
other.filter_passed = 0;
this->filter_rejected += other.filter_rejected;
other.filter_rejected = 0;
}
void addHit(const OpenMS::Size idx_pep,
const OpenMS::Size idx_prot,
const OpenMS::Size len_pep,
const OpenMS::String& seq_prot,
OpenMS::Int position)
{
if (enzyme_.isValidProduct(seq_prot, position, len_pep, true, true, xtandem_))
{
PeptideProteinMatchInformation match;
match.protein_index = idx_prot;
match.position = position;
match.AABefore = (position == 0) ? PeptideEvidence::N_TERMINAL_AA : seq_prot[position - 1];
match.AAAfter = (position + len_pep >= seq_prot.size()) ? PeptideEvidence::C_TERMINAL_AA : seq_prot[position + len_pep];
pep_to_prot[idx_pep].insert(match);
++filter_passed;
}
else
{
//std::cerr << "REJECTED Peptide " << seq_pep << " with hit to protein "
// << seq_prot << " at position " << position << std::endl;
++filter_rejected;
}
}
};
inline void addHits_(AhoCorasickAmbiguous& fuzzyAC, const AhoCorasickAmbiguous::FuzzyACPattern& pattern, const AhoCorasickAmbiguous::PeptideDB& pep_DB, const String& prot, const String& full_prot, SignedSize idx_prot, Int offset, FoundProteinFunctor& func_threads) const
{
fuzzyAC.setProtein(prot);
while (fuzzyAC.findNext(pattern))
{
const seqan::Peptide& tmp_pep = pep_DB[fuzzyAC.getHitDBIndex()];
func_threads.addHit(fuzzyAC.getHitDBIndex(), idx_prot, length(tmp_pep), full_prot, fuzzyAC.getHitProteinPosition() + offset);
}
}
void updateMembers_() override;
String decoy_string_;
bool prefix_;
String missing_decoy_action_;
String enzyme_name_;
String enzyme_specificity_;
bool write_protein_sequence_;
bool write_protein_description_;
bool keep_unreferenced_proteins_;
bool allow_unmatched_;
bool IL_equivalent_;
Int aaa_max_;
Int mm_max_;
};
}
|
bug-nested.c | // RUN: %libomp-compile && env KMP_AFFINITY=compact %libomp-run
#include <stdio.h>
#include <stdint.h>
#include <omp.h>
#include "omp_testsuite.h"
int test_nested_affinity_bug() {
int a = 0;
omp_set_nested(1);
#pragma omp parallel num_threads(2) shared(a)
{
#pragma omp parallel num_threads(2) shared(a) proc_bind(close)
{
#pragma omp atomic
a++;
}
}
return 1;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_nested_affinity_bug()) {
num_failed++;
}
}
return num_failed;
}
|
matmul.h | /* BSD 3-Clause License
* Copyright (c) 2019-2021, contributors
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef MATMUL_H
#define MATMUL_H
#include "xitao.h"
#ifdef SINGLE
typedef float real_t;
#else
typedef double real_t;
#endif
typedef uint32_t len_t;
len_t leaf = 32;
len_t N = 256;
int use_omp = 0;
int use_sta = 0;
uint32_t use_workload_hint = 0;
template<class arr>
void print_matrix(arr mat, len_t n) {
cout << "**********************";
for(int i = 0; i < n; ++i) {
for(int j = 0; j < n; ++j) {
cout << mat[i * n + j] << ",";
}
cout << endl;
}
cout << "**********************";
}
void matmul_serial(real_t* a, real_t* b, real_t* c, len_t block, len_t dim) {
for(len_t i = 0; i < block; ++i) {
for(len_t k = 0; k < block; ++k) {
for(len_t j = 0; j < block; ++j) {
c[i * dim + j] += a[i * dim + k] * b[k * dim + j];
}
}
}
}
void matmul_omp_parfor(real_t* a, real_t* b, real_t* c, len_t block, len_t dim) {
#pragma omp parallel for
for(len_t i = 0; i < block; ++i) {
for(len_t k = 0; k < block; ++k) {
for(len_t j = 0; j < block; ++j) {
c[i * dim + j] += a[i * dim + k] * b[k * dim + j];
}
}
}
}
// the Merge MatMul TAO (Every TAO class must inherit from AssemblyTask)
class MatMulTAO : public AssemblyTask {
public:
real_t* a;
real_t* b;
real_t* c;
real_t** deps;
len_t n;
atomic<len_t> next;
// the tao construction. resource hint 1
MatMulTAO(real_t* a, real_t* b, real_t* c, real_t** deps, len_t n):
a(a), b(b), c(c), deps(deps), n(n), AssemblyTask(1) {
if(use_workload_hint != 0) workload_hint = n;
for(int i = 0; i < n; ++i) c[i] = 0;
next = 0;
}
// the work function
void execute(int nthread) {
if(n <= leaf) {
len_t i = next++;
while(i < n) {
for(len_t k = 0; k < n; ++k) {
for(len_t j = 0; j < n; ++j) {
c[i * n + j] += a[i * N + k] * b[k * N + j];
}
}
i = next++;
}
} else {
len_t block = n / 2;
len_t i = next++;
auto& c1 = deps[0];
auto& c2 = deps[1];
auto& c3 = deps[2];
auto& c4 = deps[3];
auto& c5 = deps[4];
auto& c6 = deps[5];
auto& c7 = deps[6];
auto& c8 = deps[7];
while(i < block) {
for(int j = 0; j < block; ++j) {
c[i * n + j] = c1[i * block + j] + c2[i * block + j];
}
for(int j = 0; j < block; ++j) {
c[i * n + j + block] = c3[i * block + j] + c4[i * block + j];
}
for(int j = 0; j < block; ++j) {
c[(i + block) * n + j]= c5[i * block + j] + c6[i * block + j];
}
for(int j = 0; j < block; ++j) {
c[(i + block) * n + j + block] = c7[i * block + j] + c8[i * block + j];
}
i = next++;
}
}
}
void cleanup() {
if(deps != NULL) {
for(int i = 0; i < 8; ++i) delete[] deps[i];
delete[] deps;
}
}
};
void divide(real_t* a, real_t* b, real_t* c, len_t n, len_t offset_x = 0, len_t offset_y = 0, MatMulTAO* parent = NULL) {
MatMulTAO* matmul_tao;
float sta = float(offset_x * N + offset_y) / (N * N);
assert(sta < 1.0f);
if(n <= leaf) {
matmul_tao = new MatMulTAO(a, b, c, NULL, n);
matmul_tao->set_sta(sta);
xitao_push(matmul_tao);
} else {
real_t** sub_c = new real_t*[8];
real_t* a11 = a;
real_t* a12 = a + n / 2;
real_t* a21 = a + N * n / 2;
real_t* a22 = a + N * n / 2 + n / 2;
real_t* b11 = b;
real_t* b12 = b + n / 2;
real_t* b21 = b + N * n / 2;
real_t* b22 = b + N * n / 2 + n / 2;
sub_c[0] = new real_t[n / 2 * n / 2];
sub_c[1] = new real_t[n / 2 * n / 2];
sub_c[2] = new real_t[n / 2 * n / 2];
sub_c[3] = new real_t[n / 2 * n / 2];
sub_c[4] = new real_t[n / 2 * n / 2];
sub_c[5] = new real_t[n / 2 * n / 2];
sub_c[6] = new real_t[n / 2 * n / 2];
sub_c[7] = new real_t[n / 2 * n / 2];
matmul_tao = new MatMulTAO(a, b, c, sub_c, n);
matmul_tao->set_sta(sta);
divide(a11, b11, sub_c[0], n / 2, offset_x, offset_y, matmul_tao);
divide(a12, b21, sub_c[1], n / 2, offset_x, offset_y, matmul_tao);
divide(a11, b12, sub_c[2], n / 2, offset_x, offset_y + n / 2, matmul_tao);
divide(a12, b22, sub_c[3], n / 2, offset_x, offset_y + n / 2, matmul_tao);
divide(a21, b11, sub_c[4], n / 2, offset_x + n / 2, offset_y, matmul_tao);
divide(a22, b21, sub_c[5], n / 2, offset_x + n / 2, offset_y, matmul_tao);
divide(a21, b12, sub_c[6], n / 2, offset_x + n / 2, offset_y + n / 2, matmul_tao);
divide(a22, b22, sub_c[7], n / 2, offset_x + n / 2, offset_y + n / 2, matmul_tao);
}
if(parent) matmul_tao->make_edge(parent);
}
#endif
|
csr_mult_double.h | // Copyright 2021 Igor Rukhovich
#ifndef MODULES_TASK_2_RUKHOVICH_I_CSR_MULT_DOUBLE_CSR_MULT_DOUBLE_H_
#define MODULES_TASK_2_RUKHOVICH_I_CSR_MULT_DOUBLE_CSR_MULT_DOUBLE_H_
#include <omp.h>
#include <random>
#include <stdexcept>
#include <utility>
#include <vector>
class RandomDouble {
public:
static double Next() {
static RandomDouble rand = RandomDouble();
return rand.dist_(rand.gen_);
}
private:
RandomDouble() : gen_(std::random_device()()), dist_(-1e3, 1e3) {
}
std::mt19937_64 gen_;
std::uniform_real_distribution<double> dist_;
};
template <class T>
bool Compare(const T &lhs, const T &rhs) {
static float float_eps = 1e-3;
static double double_eps = 1e-6;
if (std::is_same<T, float>::value) {
return (lhs + float_eps > rhs) && (lhs - float_eps < rhs);
}
if (std::is_same<T, double>::value) {
return (lhs + double_eps > rhs) && (lhs - double_eps < rhs);
}
return lhs == rhs;
}
template <class ValueType, typename UIntType = uint16_t>
class CSRMatrixOMP {
public:
CSRMatrixOMP() = delete;
CSRMatrixOMP(const CSRMatrixOMP &other) = default;
CSRMatrixOMP(CSRMatrixOMP &&other) = default;
explicit CSRMatrixOMP(UIntType height, UIntType width)
: num_cols_(width), counts_(height + 1, 0) {
}
explicit CSRMatrixOMP(UIntType height, UIntType width, const std::vector<ValueType> &matrix)
: num_cols_(width), counts_(height + 1) {
if (static_cast<UIntType>(matrix.size()) != height * width) {
throw std::runtime_error("Init matrix must consist of height*width elements");
}
counts_[0] = 0;
for (UIntType row = 0; row < height; ++row) {
counts_[row + 1] = counts_[row];
for (UIntType col = 0; col < width; ++col) {
auto cur_val = matrix[width * row + col];
if (cur_val) {
vals_.emplace_back(cur_val);
cols_.emplace_back(col);
++counts_[row + 1];
}
}
}
}
CSRMatrixOMP &operator=(CSRMatrixOMP other) {
Swap(other);
return *this;
}
void Swap(CSRMatrixOMP &other) {
std::swap(vals_, other.vals_);
std::swap(cols_, other.cols_);
std::swap(counts_, other.counts_);
std::swap(num_cols_, other.num_cols_);
}
bool operator==(const CSRMatrixOMP &rhs) const {
const CSRMatrixOMP &lhs = *this;
if (lhs.vals_.size() != rhs.vals_.size() || lhs.counts_.size() != rhs.counts_.size() ||
lhs.num_cols_ != rhs.num_cols_) {
return false;
}
for (UIntType i = 1; i < lhs.counts_.size(); ++i) {
if (lhs.counts_[i] != rhs.counts_[i]) {
return false;
}
}
for (UIntType i = 0; i < lhs.vals_.size(); ++i) {
if (lhs.cols_[i] != rhs.cols_[i] || !Compare(lhs.vals_[i], rhs.vals_[i])) {
return false;
}
}
return true;
}
CSRMatrixOMP &operator*=(const CSRMatrixOMP &other) {
CSRMatrixOMP rhs = other.GetTransposed();
UIntType res_width = other.num_cols_;
UIntType res_height = counts_.size() - 1;
std::vector<ValueType> res_mat(res_width * res_height);
#pragma omp parallel for schedule(dynamic)
for (int32_t row = 0; row < static_cast<int32_t>(res_height); ++row) {
for (UIntType col = 0; col < res_width; ++col) {
UIntType l_row_cur = counts_[row], r_row_cur = rhs.counts_[col];
ValueType cur_val = 0;
while (l_row_cur < counts_[row + 1] && r_row_cur < rhs.counts_[col + 1]) {
if (cols_[l_row_cur] < rhs.cols_[r_row_cur]) {
++l_row_cur;
} else if (cols_[l_row_cur] > rhs.cols_[r_row_cur]) {
++r_row_cur;
} else {
cur_val += vals_[l_row_cur++] * rhs.vals_[r_row_cur++];
}
}
res_mat[row * res_width + col] = cur_val;
}
}
num_cols_ = res_width;
vals_.resize(0);
cols_.resize(0);
counts_.resize(res_height + 1);
counts_[0] = 0;
for (UIntType row = 0; row < res_height; ++row) {
counts_[row + 1] = counts_[row];
for (UIntType col = 0; col < res_width; ++col) {
auto cur_val = res_mat[res_width * row + col];
if (cur_val) {
vals_.emplace_back(cur_val);
cols_.emplace_back(col);
++counts_[row + 1];
}
}
}
return *this;
}
protected:
// We can think of it as converting CSR to CSC and back again
const CSRMatrixOMP GetTransposed() const {
CSRMatrixOMP other(num_cols_, counts_.size() - 1);
for (UIntType i = 0; i < cols_.size(); ++i) {
++other.counts_[cols_[i]];
}
for (UIntType col = 0, sum = 0; col < other.counts_.size(); ++col) {
UIntType tmp = other.counts_[col];
other.counts_[col] = sum;
sum += tmp;
}
other.cols_.resize(other.counts_.back());
other.vals_.resize(other.counts_.back());
for (UIntType row = 0; static_cast<UIntType>(row + 1) < counts_.size(); ++row) {
for (UIntType cnt = counts_[row]; cnt < counts_[row + 1]; ++cnt) {
UIntType col = cols_[cnt];
UIntType dest_place = other.counts_[col];
other.vals_[dest_place] = vals_[cnt];
other.cols_[dest_place] = row;
++other.counts_[col];
}
}
for (UIntType col = 0, last = 0; col <= num_cols_; ++col) {
UIntType tmp = other.counts_[col];
other.counts_[col] = last;
last = tmp;
}
return other;
}
UIntType num_cols_;
std::vector<ValueType> vals_;
std::vector<UIntType> cols_;
std::vector<UIntType> counts_;
};
#endif // MODULES_TASK_2_RUKHOVICH_I_CSR_MULT_DOUBLE_CSR_MULT_DOUBLE_H_
|
tree_stats.h | /**
* This class lets us write methods to gather tree structure statistics once,
* and apply them to many data structures.
*/
#ifndef TREE_STATS_H
#define TREE_STATS_H
#ifdef USE_TREE_STATS
#include <cassert>
#include <sstream>
#include <string>
#include <vector>
#include <limits>
#include "plaf.h"
#define MAX_HEIGHT (1<<10)
/**
* TODO: extend tree_stats.h to start tracking memory layout issues
* (avg cache line crossings,
* avg cache set occupancy (need to demarcate search data),
* neighbouring object types,
* page crossings,
* avg page density,
* alignment histogram,
* page occupancy visualizations,
* unique pages needed,
* unique cache lines needed)
*/
template <typename NodeHandlerT>
class TreeStats {
private:
typedef typename NodeHandlerT::NodePtrType nodeptr;
PAD;
size_t internalsAtDepth[MAX_HEIGHT];
size_t leavesAtDepth[MAX_HEIGHT];
size_t keysAtDepth[MAX_HEIGHT];
size_t keysInLeavesAtDepth[MAX_HEIGHT];
size_t keysInInternalsAtDepth[MAX_HEIGHT];
size_t sumOfKeys;
#ifdef TREE_STATS_BYTES_AT_DEPTH
size_t bytesAtDepth[MAX_HEIGHT];
#endif
PAD;
void computeStats(NodeHandlerT * handler, nodeptr node, size_t depth, size_t maxDepth = std::numeric_limits<size_t>::max()) {
if (!handler) return;
//std::cout<<"nodeAddr="<<(size_t)node<<" depth="<<depth<<" degree="<<node->size<<" internal?="<<NodeHandlerT::isInternal(node)<<std::endl;
if (!node || depth > maxDepth) return;
size_t numKeys = handler->getNumKeys(node);
keysAtDepth[depth] += numKeys;
sumOfKeys += handler->getSumOfKeys(node);
#ifdef TREE_STATS_BYTES_AT_DEPTH
bytesAtDepth[depth] += handler->getSizeInBytes(node);
#endif
if (handler->isLeaf(node)) {
++leavesAtDepth[depth];
keysInLeavesAtDepth[depth] += numKeys;
} else {
++internalsAtDepth[depth];
keysInInternalsAtDepth[depth] += numKeys;
auto it = handler->getChildIterator(node);
while (it.hasNext()) {
auto child = it.next();
computeStats(handler, child, 1+depth, maxDepth);
}
}
}
public:
TreeStats(NodeHandlerT * handler, nodeptr root, bool parallelConstruction, bool freeHandler = true) {
for (size_t d=0;d<MAX_HEIGHT;++d) {
internalsAtDepth[d] = 0;
leavesAtDepth[d] = 0;
keysAtDepth[d] = 0;
keysInLeavesAtDepth[d] = 0;
keysInInternalsAtDepth[d] = 0;
#ifdef TREE_STATS_BYTES_AT_DEPTH
bytesAtDepth[d] = 0;
#endif
}
sumOfKeys = 0;
#ifdef _OPENMP
if (!handler) return;
if (!parallelConstruction) {
computeStats(handler, root, 0);
} else {
/**
* PARALLEL constructor
*/
std::cout<<"computing tree_stats in PARALLEL..."<<std::endl;
#ifdef _OPENMP
const size_t minNodes = 4*omp_get_max_threads();
#else
const size_t minNodes = 1;
#endif
std::vector<nodeptr> qn; // queue of node pointers
std::vector<size_t> qd; // queue of depths
qn.reserve(minNodes*2);
qd.reserve(minNodes*2);
qn.push_back(root);
qd.push_back(0);
size_t ix = 0; // index of top in qn and qd
size_t currDepth = 0;
size_t ixStartOfDepth = 0;
size_t nodesSeenAtDepth = 0;
#ifdef _OPENMP
const size_t ompThreads = omp_get_max_threads();
#else
const size_t ompThreads = 1;
#endif
std::cout<<"bounded depth BFS to partition into subtrees for parallel computation ("<<ompThreads<<" threads)..."<<std::endl;
while (ix < qn.size()) {
auto node = qn[ix];
auto depth = qd[ix];
TRACE COUTATOMIC("tree_stats: queue visiting node "<<(uintptr_t) node<<" at depth "<<depth<<std::endl);
++ix;
TRACE COUTATOMIC("tree_stats: new ix="<<ix<<std::endl);
if (depth != currDepth) {
if (nodesSeenAtDepth >= minNodes) {
TRACE COUTATOMIC("tree_stats: we have seen enough nodes ("<<nodesSeenAtDepth<<") at depth "<<currDepth<<" so we cut off the bfs"<<std::endl);
--ix;
TRACE COUTATOMIC("tree_stats: new new ix="<<ix<<std::endl);
break;
}
currDepth = depth;
nodesSeenAtDepth = 0;
ixStartOfDepth = ix-1;
TRACE COUTATOMIC("tree_stats: new depth "<<depth<<" ixStartOfDepth="<<ixStartOfDepth<<std::endl);
}
++nodesSeenAtDepth;
TRACE COUTATOMIC("tree_stats: nodesSeenAtDepth "<<currDepth<<" changed to "<<nodesSeenAtDepth<<std::endl);
// add any children to the queue
if (node && !handler->isLeaf(node)) {
auto it = handler->getChildIterator(node);
while (it.hasNext()) {
auto child = it.next();
TRACE COUTATOMIC("tree_stats: add child "<<(uintptr_t) child<<" of node "<<(uintptr_t) node<<" to queue at depth "<<(1+depth)<<std::endl);
qn.push_back(child);
qd.push_back(1+depth);
}
}
}
if (nodesSeenAtDepth < minNodes) {
currDepth = 0;
ix = 1;
ixStartOfDepth = 0;
TRACE COUTATOMIC("tree_stats: not enough nodes seen ("<<nodesSeenAtDepth<<") at any depth just partition into ONE tree"<<std::endl);
}
// we now have at least minNodes subtrees to process (in qn[ixStartOfDepth ... ix]),
// so we can use openmp parallel for to construct TreeStats for these subtrees in parallel.
std::cout<<"partitioned into "<<(ix-ixStartOfDepth)<<" subtrees; running parallel for..."<<std::endl;
#pragma omp parallel for schedule(dynamic, 1)
for (size_t i=ixStartOfDepth;i<ix;++i) {
TreeStats<NodeHandlerT> * ts = new TreeStats(handler, qn[i], false, false);
TRACE COUTATOMIC("tree_stats: sequential compute at depth "<<currDepth<<std::endl);
for (size_t d=0;d<MAX_HEIGHT-currDepth;++d) {
FAA(&internalsAtDepth[d+currDepth], ts->internalsAtDepth[d]);
FAA(&leavesAtDepth[d+currDepth], ts->leavesAtDepth[d]);
FAA(&keysAtDepth[d+currDepth], ts->keysAtDepth[d]);
FAA(&keysInLeavesAtDepth[d+currDepth], ts->keysInLeavesAtDepth[d]);
FAA(&keysInInternalsAtDepth[d+currDepth], ts->keysInInternalsAtDepth[d]);
#ifdef TREE_STATS_BYTES_AT_DEPTH
FAA(&bytesAtDepth[d+currDepth], ts->bytesAtDepth[d]);
#endif
}
FAA(&sumOfKeys, ts->sumOfKeys);
delete ts;
}
// std::cout<<"currDepth="<<currDepth<<std::endl;
// std::cout<<(ix-ixStartOfDepth+1)<<" subtrees computed in parallel... addresses:"<<std::endl;
// for (int i=ixStartOfDepth;i<ix;++i) {
// std::cout<<" "<<qn[i]<<"[depth "<<qd[i]<<"]";
// }
// std::cout<<std::endl;
// compute stats for the top of the tree, ABOVE the parallel constructed subtrees.
std::cout<<"computing stats for the top of the tree (above the partitions)..."<<std::endl;
if (currDepth > 0) computeStats(handler, root, 0, currDepth - 1);
}
#else
computeStats(handler, root, 0);
#endif
if (freeHandler) delete handler;
}
size_t getInternalsAtDepth(size_t d) {
assert(d < MAX_HEIGHT);
return internalsAtDepth[d];
}
size_t getLeavesAtDepth(size_t d) {
assert(d < MAX_HEIGHT);
return leavesAtDepth[d];
}
size_t getNodesAtDepth(size_t d) {
assert(d < MAX_HEIGHT);
return getInternalsAtDepth(d) + getLeavesAtDepth(d);
}
size_t getHeight() {
size_t d=0;
while (d < MAX_HEIGHT && getNodesAtDepth(d) > 0) {
++d;
}
return d;
}
size_t getInternals() {
size_t maxDepth = getHeight();
size_t result = 0;
for (size_t d=0;d<maxDepth;++d) {
result += getInternalsAtDepth(d);
}
return result;
}
size_t getLeaves() {
size_t maxDepth = getHeight();
size_t result = 0;
for (size_t d=0;d<maxDepth;++d) {
result += getLeavesAtDepth(d);
}
return result;
}
size_t getNodes() {
return getInternals() + getLeaves();
}
size_t getPointersAtDepth(size_t d) {
assert(d+1 < MAX_HEIGHT);
return getNodesAtDepth(d+1);
}
size_t getKeysAtDepth(size_t d) {
assert(d < MAX_HEIGHT);
return keysAtDepth[d];
}
size_t getKeys() {
size_t maxDepth = getHeight();
size_t result = 0;
for (size_t d=0;d<maxDepth;++d) {
result += getKeysAtDepth(d);
}
return result;
}
size_t getKeysInLeaves() {
size_t maxDepth = getHeight();
size_t result = 0;
for (size_t d=0;d<maxDepth;++d) {
result += keysInLeavesAtDepth[d];
}
return result;
}
size_t getKeysInInternals() {
size_t maxDepth = getHeight();
size_t result = 0;
for (size_t d=0;d<maxDepth;++d) {
result += keysInInternalsAtDepth[d];
}
return result;
}
double getAverageDegreeLeavesAtDepth(size_t d) {
double denom = getLeavesAtDepth(d);
return (denom == 0) ? 0 : getKeysAtDepth(d) / denom;
}
double getAverageDegreeLeaves() {
double denom = getLeaves();
return (denom == 0) ? 0 : getKeysInLeaves() / denom;
}
double getAverageDegreeInternalsAtDepth(size_t d) {
double denom = getInternalsAtDepth(d);
return (denom == 0) ? 0 : getPointersAtDepth(d) / denom;
}
double getAverageDegreeInternals() {
double denom = getInternals();
return (denom == 0) ? 0 : getNodes() / denom;
}
double getAverageDegreeAtDepth(size_t d) {
// double denom = getNodesAtDepth(d);
// return (getPointersAtDepth(d) + getKeysAtDepth(d)) / denom;
return (getPointersAtDepth(d) + keysInLeavesAtDepth[d]) / (double) getNodesAtDepth(d);
}
double getAverageDegree() {
double denom = getNodes();
return (denom == 0) ? 0 : (denom + getKeysInLeaves()) / denom;
}
double getAverageKeyDepth() {
size_t height = getHeight();
size_t sumDepths = 0;
for (size_t d=0;d<height;++d) {
sumDepths += keysAtDepth[d] * d;
}
double denom = getKeys();
return (denom == 0) ? 0 : sumDepths / denom;
}
#ifdef TREE_STATS_BYTES_AT_DEPTH
size_t getBytesAtDepth(size_t d) {
return bytesAtDepth[d];
}
size_t getSizeInBytes() {
size_t height = getHeight();
size_t bytes = 0;
for (size_t d=0;d<height;++d) {
bytes += bytesAtDepth[d];
}
return bytes;
}
#endif
size_t getSumOfKeys() {
return sumOfKeys;
}
std::string toString() {
std::stringstream ss;
size_t height = getHeight();
ss<<"tree_stats_numInternalsAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getInternalsAtDepth(d);
}
ss<<std::endl;
ss<<"tree_stats_numLeavesAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getLeavesAtDepth(d);
}
ss<<std::endl;
ss<<"tree_stats_numNodesAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getNodesAtDepth(d);
}
ss<<std::endl;
// ss<<"tree_stats_numPointersAtDepth=";
// for (size_t d=0;d<height;++d) {
// ss<<(d?" ":"")<<getPointersAtDepth(d);
// }
// ss<<std::endl;
ss<<"tree_stats_numKeysAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getKeysAtDepth(d);
}
ss<<std::endl;
// ss<<"tree_stats_avgDegreeLeavesAtDepth=";
// for (size_t d=0;d<height;++d) {
// ss<<(d?" ":"")<<getAverageDegreeLeavesAtDepth(d);
// }
// ss<<std::endl;
//
// ss<<"tree_stats_avgDegreeInternalsAtDepth=";
// for (size_t d=0;d<height;++d) {
// ss<<(d?" ":"")<<getAverageDegreeInternalsAtDepth(d);
// }
// ss<<std::endl;
ss<<"tree_stats_avgDegreeAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getAverageDegreeAtDepth(d);
}
ss<<std::endl;
ss<<std::endl;
ss<<"tree_stats_height="<<height<<std::endl;
ss<<"tree_stats_numInternals="<<getInternals()<<std::endl;
ss<<"tree_stats_numLeaves="<<getLeaves()<<std::endl;
ss<<"tree_stats_numNodes="<<getNodes()<<std::endl;
ss<<"tree_stats_numKeys="<<getKeys()<<std::endl;
ss<<std::endl;
ss<<"tree_stats_avgDegreeInternal="<<getAverageDegreeInternals()<<std::endl;
ss<<"tree_stats_avgDegreeLeaves="<<getAverageDegreeLeaves()<<std::endl;
ss<<"tree_stats_avgDegree="<<getAverageDegree()<<std::endl;
ss<<"tree_stats_avgKeyDepth="<<getAverageKeyDepth()<<std::endl;
#ifdef TREE_STATS_BYTES_AT_DEPTH
ss<<std::endl;
ss<<"tree_stats_bytesAtDepth=";
for (size_t d=0;d<height;++d) {
ss<<(d?" ":"")<<getBytesAtDepth(d);
}
ss<<std::endl;
ss<<"tree_stats_sizeInBytes="<<getSizeInBytes()<<std::endl;
#endif
return ss.str();
}
};
#endif
#endif /* TREE_STATS_H */
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <mxnet/c_api.h>
#include <mxnet/kvstore.h>
#include <ps/ps.h>
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "../profiler/profiler.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
#include <stdlib.h>
#include "kvstore_dist.h"
#ifndef FINE_GRAIN_MSG
#define FINE_GRAIN_MSG
#endif
#ifndef GRAD_RECOVERY
#define GRAD_RECOVERY
#endif
/* #ifndef SERVER_MLR
#define SERVER_MLR
#endif */
/* #ifndef RECV_RANDOM_DROP
#define RECV_RANDOM_DROP
#endif */
namespace mxnet {
namespace kvstore {
// maintain same order in frontend.
enum class CommandType {
kController, kSetMultiPrecision, kStopServer, kSyncMode,
kSetGradientCompression, kSetProfilerParams
};
enum class RequestType {
kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull
};
struct DataHandleType {
RequestType requestType;
int dtype;
};
/*!
* Uses Cantor pairing function to generate a unique number given two numbers.
* This number can also be inverted to find the unique pair whose Cantor value is this number.
* Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function
* \param requestType RequestType
* \param dtype integer
* \return Cantor value of arguments
*/
static int GetCommandType(RequestType requestType, int d) {
int m = static_cast<int>(requestType);
return (((m + d) * (m + d + 1)) / 2) + d;
}
/*!
* Unpairs Cantor value and finds the two integers used to pair.
* Then returns DataHandleType object with those numbers.
* \param cmd DataHandleCommand generated by GetCommandType function
* \return DataHandleType
*/
static DataHandleType DepairDataHandleType(int cmd) {
int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2);
int t = ((w * w) + w) / 2;
int y = cmd - t;
int x = w - y;
CHECK_GE(x, 0);
CHECK_GE(y, 0);
DataHandleType type;
type.requestType = static_cast<RequestType>(x);
type.dtype = y;
return type;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f();
blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<char>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
#ifdef FINE_GRAIN_MSG
enable_dgt = dmlc::GetEnv("ENABLE_DGT", false);
dgt_info = dmlc::GetEnv("DGT_INFO", false);
std::cout << "enable_dgt = " << enable_dgt << "dgt_info = " << dgt_info << std::endl;
#endif
}
~KVStoreDistServer() {
profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0));
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct UpdateBuf {
std::vector<ps::KVMeta> request;
NDArray merged;
// temp_array is used to cast received values as float32 for computation if required,ifdefine GRAD_RECOVERY, used to store pre-veriage-grad
NDArray temp_array;
#ifdef GRAD_RECOVERY
int update_num = 0;
#endif
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
CommandType recved_type = static_cast<CommandType>(recved.head);
switch (recved_type) {
case CommandType::kStopServer:
exec_.Stop();
break;
case CommandType::kSyncMode:
sync_mode_ = true;
break;
case CommandType::kSetGradientCompression:
gradient_compression_->DecodeParams(recved.body);
break;
case CommandType::kSetProfilerParams:
// last char is the type of profiler command
ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand>
(recved.body.back() - '0'),
recved.body);
break;
case CommandType::kSetMultiPrecision:
// uses value 1 for message id from frontend
if (!multi_precision_) {
multi_precision_ = true;
CreateMultiPrecisionCopies();
}
break;
case CommandType::kController:
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
break;
}
app->Response(recved);
}
/*
* For keys already initialized, if necessary create stored_realt.
* This will only be used if by some wrong usage of kvstore,
* some keys are initialized before optimizer is set.
*/
void CreateMultiPrecisionCopies() {
for (auto const &stored_entry : store_) {
const int key = stored_entry.first;
const NDArray &stored = stored_entry.second;
if (stored.dtype() != mshadow::kFloat32) {
auto &stored_realt = store_realt_[key];
if (stored.storage_type() == kRowSparseStorage) {
stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(),
true, mshadow::kFloat32);
} else {
stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32);
}
auto &update = update_buf_[key];
if (!update.merged.is_none()) {
if (update.merged.storage_type() == kRowSparseStorage) {
update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(),
true, mshadow::kFloat32);
} else {
update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false,
mshadow::kFloat32);
}
}
CHECK(update.request.size() == 0)
<< ps::MyRank() << "Multiprecision mode can not be set while pushes are underway."
<< "Please set optimizer before pushing keys." << key << " " << update.request.size();
CopyFromTo(stored, stored_realt);
}
}
for (auto const &stored_realt_entry : store_realt_) {
stored_realt_entry.second.WaitToRead();
}
}
void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) {
switch (type) {
case KVStoreServerProfilerCommand::kSetConfig:
SetProfilerConfig(body.substr(0, body.size() - 1));
break;
case KVStoreServerProfilerCommand::kState:
MXSetProfilerState(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kPause:
MXProfilePause(static_cast<int>(body.front() - '0'));
break;
case KVStoreServerProfilerCommand::kDump:
MXDumpProfile(static_cast<int>(body.front() - '0'));
break;
}
}
void SetProfilerConfig(std::string params_str) {
std::vector<std::string> elems;
mxnet::kvstore::split(params_str, ',', std::back_inserter(elems));
std::vector<const char*> ckeys;
std::vector<const char*> cvals;
ckeys.reserve(elems.size());
cvals.reserve(elems.size());
for (size_t i=0; i < elems.size(); i++) {
std::vector<std::string> parts;
mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts));
CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker";
CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty";
CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0];
if (parts[0] == "filename") {
parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1];
}
char* ckey = new char[parts[0].length() + 1];
std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str());
ckeys.push_back(ckey);
char* cval = new char[parts[1].length() + 1];
std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str());
cvals.push_back(cval);
}
MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]);
for (size_t i=0; i < ckeys.size(); i++) {
delete[] ckeys[i];
delete[] cvals[i];
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
DataHandleType type = DepairDataHandleType(req_meta.cmd);
switch (type.requestType) {
case RequestType::kRowSparsePushPull:
DataHandleRowSparse(type, req_meta, req_data, server);
break;
case RequestType::kCompressedPushPull:
DataHandleCompressed(type, req_meta, req_data, server);
break;
case RequestType::kDefaultPushPull:
DataHandleDefault(type, req_meta, req_data, server);
break;
}
}
inline bool has_multi_precision_copy(const DataHandleType type) {
return multi_precision_ && type.dtype != mshadow::kFloat32;
}
inline void ApplyUpdates(const DataHandleType type, const int key,
UpdateBuf *update_buf, ps::KVServer<char>* server) {
if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array;
if (updater_) {
exec_.Exec([this, key, &update, &stored](){
CHECK(updater_);
updater_(key, update, &stored);
});
} else {
CHECK(sync_mode_) << "Updater needs to be set for async mode";
// if no updater, just copy
CopyFromTo(update_buf->merged, &stored);
}
if (log_verbose_) {
LOG(INFO) << "sent response to " << update_buf->request.size() << " workers";
}
for (const auto& req : update_buf->request) {
server->Response(req);
}
update_buf->request.clear();
if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]);
stored.WaitToRead();
} else {
update_buf->merged.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void AccumulateRowSparseGrads(const DataHandleType type,
const NDArray& recved,
UpdateBuf* updateBuf) {
NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array);
const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved;
// accumulate row_sparse gradients
using namespace mshadow;
Engine::Get()->PushAsync(
[to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>(
{}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out});
on_complete();
}, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &(updateBuf->merged), 0);
updateBuf->merged.WaitToRead();
}
void RowSparsePullResponse(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<char> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
const NDArray& stored = store_[master_key];
if (has_multi_precision_copy(type)) stored.WaitToRead();
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const int num_bytes = mshadow::mshadow_sizeof(type.dtype);
const int unit_size = unit_len * num_bytes;
const char* data = static_cast<char *> (stored.data().dptr_);
auto len = num_rows * unit_size;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_size;
auto begin = (i - 1) * unit_size;
auto end = i * unit_size;
response.vals.segment(begin, end).CopyFrom(src, unit_size);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
void InitRowSparseStored(const DataHandleType type,
const int master_key,
const size_t num_rows,
const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key];
int dtype = type.dtype;
int num_bytes = mshadow::mshadow_sizeof(dtype);
auto unit_len = req_data.lens[1] / num_bytes;
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
if (has_multi_precision_copy(type)) {
store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype);
}
Engine::Get()->PushAsync(
[this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
using namespace mxnet::op;
nnvm::dim_t nnr = rsp.shape()[0];
MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, {
IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>();
mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx);
});
TBlob rsp_data = rsp.data();
// copies or casts as appropriate
ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext());
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
if (has_multi_precision_copy(type)) {
CopyFromTo(stored, store_[master_key]);
store_[master_key].WaitToRead();
}
stored.WaitToRead();
server->Response(req_meta);
}
void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char>& req_data,
ps::KVServer<char>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server);
return;
} else {
if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys;
auto& updates = update_buf_[master_key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false,
mshadow::kFloat32);
}
if (num_rows == 0) {
if (sync_mode_) {
if (updates.request.empty()) {
// reset to zeros
int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype;
updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(),
true, merged_dtype);
} // else nothing to aggregate
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
} else {
server->Response(req_meta);
}
} else {
auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype);
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()),
dshape, cpu::kDevMask);
})
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
AccumulateRowSparseGrads(type, recved, &updates);
}
updates.request.push_back(req_meta);
ApplyUpdates(type, master_key, &updates, server);
}
}
} else {
// pull
RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server);
}
}
void DefaultStorageResponse(const DataHandleType type,
const int key,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
ps::KVPairs<char> response;
const NDArray& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
// as server returns when store_realt is ready in this case
if (has_multi_precision_copy(type)) stored.WaitToRead();
auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype());
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
void DataHandleCompressed(const DataHandleType type,
const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
CHECK_EQ(type.dtype, mshadow::kFloat32)
<< "Gradient compression is currently supported for fp32 only";
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = update_buf_[key];
if (merged.merged.is_none()) {
merged.merged = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.merged, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.merged += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(type, key, &merged, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta,
const ps::KVPairs<char> &req_data,
ps::KVServer<char>* server) {
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)};
TShape dshape(ds, ds + 1);
TBlob recv_blob;
MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, {
recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask);
})
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
if (has_multi_precision_copy(type)) {
auto& stored_dtype = store_[key];
stored_dtype = NDArray(dshape, Context(), false, type.dtype);
CopyFromTo(stored, stored_dtype);
stored_dtype.WaitToRead();
}
stored.WaitToRead();
} else {
auto &updates = update_buf_[key];
if (sync_mode_ && updates.merged.is_none()) {
updates.merged = NDArray(dshape, Context(), false,
has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype);
}
if (has_multi_precision_copy(type) && updates.temp_array.is_none()) {
updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32);
}
if (updates.request.empty()) {
if (sync_mode_) {
CopyFromTo(recved, updates.merged);
} else {
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
} else {
updates.temp_array = recved;
}
}
} else {
CHECK(sync_mode_);
if (has_multi_precision_copy(type)) {
CopyFromTo(recved, updates.temp_array);
updates.merged += updates.temp_array;
} else {
updates.merged += recved;
}
}
updates.request.push_back(req_meta);
ApplyUpdates(type, key, &updates, server);
}
} else {
DefaultStorageResponse(type, key, req_meta, req_data, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
std::mutex mu_;
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, NDArray> store_realt_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, UpdateBuf> update_buf_;
#ifdef FINE_GRAIN_MSG
bool enable_dgt = 0;
bool dgt_info = 0;
#endif
#ifdef SERVER_MLR
std::unordered_map<int,float> arrive_rate;
#endif
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<char>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/*
* \brief whether to use multi precision mode.
* in multi precision mode, all weights are stored as float32.
* any gradient received will be cast to float32 before accumulation and updating of weights.
*/
bool multi_precision_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::RoundUpToAlignment(sizeof(T),
llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only those declarations that meet some run-time
/// criteria.
template <class FilterPredicate> class filtered_clause_iterator {
ArrayRef<OMPClause *>::const_iterator Current;
ArrayRef<OMPClause *>::const_iterator End;
FilterPredicate Pred;
void SkipToNextClause() {
while (Current != End && !Pred(*Current))
++Current;
}
public:
typedef const OMPClause *value_type;
filtered_clause_iterator() : Current(), End() {}
filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred)
: Current(Arr.begin()), End(Arr.end()), Pred(Pred) {
SkipToNextClause();
}
value_type operator*() const { return *Current; }
value_type operator->() const { return *Current; }
filtered_clause_iterator &operator++() {
++Current;
SkipToNextClause();
return *this;
}
filtered_clause_iterator operator++(int) {
filtered_clause_iterator tmp(*this);
++(*this);
return tmp;
}
bool operator!() { return Current == End; }
operator bool() { return Current != End; }
};
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range();
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, NumClauses, 1),
CollapsedNum(CollapsedNum) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), NumClauses,
1),
CollapsedNum(CollapsedNum) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
unsigned getCollapsedNumber() const { return CollapsedNum; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc,
EndLoc, NumClauses, 1),
CollapsedNum(CollapsedNum) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPForDirectiveClass, OMPD_for,
SourceLocation(), SourceLocation(), NumClauses,
1),
CollapsedNum(CollapsedNum) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
unsigned getCollapsedNumber() const { return CollapsedNum; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, 0, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), 0, 1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelForDirectiveClass,
OMPD_parallel_for, StartLoc, EndLoc, NumClauses,
1),
CollapsedNum(CollapsedNum) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelForDirectiveClass,
OMPD_parallel_for, SourceLocation(),
SourceLocation(), NumClauses, 1),
CollapsedNum(CollapsedNum) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
unsigned getCollapsedNumber() const { return CollapsedNum; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
} // end namespace clang
#endif
|
cgadither.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "common.h"
// FIXME backport fixes to dither_s and dither_m_c here
extern int opt_alt;
static float PI = acos(-1);
typedef struct {
short hue;
float saturation, value;
} hsv_t;
typedef struct {
size_t w, h;
hsv_t* pixels;
} hsvimg_t;
#define MAX(A, B, C) ((A >= B && A >= C) ? A : (B >= A && B >= C) ? B : C)
#define MIN(A, B, C) ((A <= B && A <= C) ? A : (B <= A && B <= C) ? B : C)
static inline hsv_t _toHSL(pixel_t ip)
{
hsv_t ret;
float r = (float)ip.r / 255.f, g = (float)ip.g / 255.f, b = (float)ip.b / 255.f;
float max = MAX(r, g, b);
float min = MIN(r, g, b);
float ax, bx;
ret.value = (max + min) / 2.f;
if(max == min){
ret.hue = ret.saturation = 0.f; // achromatic
}else{
float C = max - min;
ret.saturation = C / (1.f - fabs(2.f * ret.value - 1.f));
if(max == r) {
ret.hue = 60.f * fmodf((g - b) / C, 6.f);
} else if(max == g) {
ret.hue = 60.f * ((b - r) / C + 2.f);
} else {
ret.hue = 60.f * ((r - g) / C + 4.f);
}
}
// BLOCK: from HSV to HSL
ax = ret.value * (1.f - ret.saturation / 2.f);
bx = (ax < 1 - ax) ? ax : 1 - ax;
if(bx >= 0.00001f) bx = (ret.value - ax) / bx;
else bx = 0.f;
ret.value = ax;
ret.saturation = bx;
// END BLOCK
return ret;
}
static inline pixel_t _fromHSL(hsv_t p)
{
struct { float r, g, b; } ret;
float ax, bx;
// BLOCK: from HSL to HSV
bx = (p.value < 1.f - p.value) ? p.value : 1.f - p.value;
ax = p.value + p.saturation * bx;
if(bx == 0.f) p.saturation = 0.f;
else p.saturation = 2.f * (1.f - p.value / bx);
// END BLOCK
if(p.saturation == 0.f) {
pixel_t ret = { p.value * 255.f, p.value * 255.f, p.value * 255.f };
return ret;
}
float C = (1.f - fabsf(2.f * p.value - 1.f)) * p.saturation;
float X = C * (1.f - fabsf(fmodf(p.hue / 60.f, 2.f) - 1.f));
float m = 1.f * (p.value - 0.5f * C);
if(p.hue < 0.f) {
ret.r = ret.g = ret.b = m;
} else if(p.hue < 60.f) {
ret.r = C + m;
ret.g = X + m;
ret.b = m;
} else if(p.hue < 120.f) {
ret.r = X + m;
ret.g = C + m;
ret.b = m;
} else if(p.hue < 180.f) {
ret.r = m;
ret.g = C + m;
ret.b = X + m;
} else if(p.hue < 240.f) {
ret.r = m;
ret.g = X + m;
ret.b = C + m;
} else if(p.hue < 300.f) {
ret.r = X + m;
ret.g = m;
ret.b = C + m;
} else if(p.hue < 360.f) {
ret.r = C + m;
ret.g = m;
ret.b = X + m;
} else {
ret.r = ret.g = ret.b = m;
}
pixel_t realRet = {
ret.r * 255.f,
ret.g * 255.f,
ret.b * 255.f
};
return realRet;
}
typedef struct {
size_t i;
union {
img_t asRGB;
hsvimg_t asHSV;
} in;
union {
img_t asRGB;
hsvimg_t asHSV;
} out;
} tdata_t;
typedef struct {
size_t i;
union {
img_t asRGB;
hsvimg_t asHSV;
} img;
uint8_t* isMagenta;
uint8_t* isGray;
uint8_t* isWhite;
int* randomness;
} tddata_t;
static void _proc_toHSL(void* data)
{
tdata_t* mydata = (tdata_t*)data;
size_t j;
for(j = 0; j < mydata->in.asRGB.w; ++j) {
A(mydata->out.asHSV, mydata->i, j) =
_toHSL(A(mydata->in.asRGB, mydata->i, j));
}
}
static void _normalize(hsvimg_t img)
{
float min = 1.f, max = 0.f;
float mins = 1.f, maxs = 0.f;
size_t i, j;
// get normalization extents
for(i = 0; i < img.h; ++i) {
for(j = 0; j < img.w; ++j) {
if(A(img, i, j).value < min) min = A(img, i, j).value;
if(A(img, i, j).value > max) max = A(img, i, j).value;
if(A(img, i, j).saturation < mins) mins = A(img, i, j).saturation;
if(A(img, i, j).saturation > maxs) maxs = A(img, i, j).saturation;
}
}
// normalize and partition
#pragma parallel for
for(i = 0; i < img.h; ++i) {
int k = 0;
for(k = 0; k < img.w; ++k) {
A(img, i, k).value = (A(img, i, k).value - min) / max;
A(img, i, k).saturation = (A(img, i, k).saturation - mins) / maxs;
}
}
}
static inline void _genRandom(int n, int* a)
{
int i;
for(i = 0; i < n; ++i) {
a[i] = rand();
}
}
static void _dither_m_c(void* data)
{
#define MY(A, J) (mydata->A[mydata->img.asHSV.w*mydata->i+J])
tddata_t* mydata = (tddata_t*)data;
size_t j;
short dC, dM;
short ah, bh;
float median, Nu;
#define MAGENTA 300
#define CYAN 180
#define MINUS_MAGENTA 60
#define MINUS_CYAN 180
#define MINUS(s) (360 - s)
for(j = 0; j < mydata->img.asHSV.w; ++j) {
hsv_t p = A(mydata->img.asHSV, mydata->i, j);
ah = abs((p.hue + MINUS_CYAN) % 360);
bh = abs((CYAN + MINUS(p.hue)) % 360);
dC = (ah < bh) ? ah : bh;
ah = abs((p.hue + MINUS_MAGENTA) % 360);
bh = abs((MAGENTA + MINUS(p.hue)) % 360);
dM = (ah < bh) ? ah : bh;
median = 1.f - ((float)dM) / ((float)dM + (float)dC);
Nu = (float)MY(randomness, j) / (float)RAND_MAX;
#if 0
//Nu = mydata->randomness[mydata->img.asHSV.w * mydata->i + j] / RAND_MAX;
MY(isMagenta, j) = (Nu < median);
//mydata->isMagenta[mydata->img.asHSV.w * mydata->i + j] = (Nu < median);
#endif
float x = (Nu < median) ? sqrtf(Nu * median) : (1.f - sqrtf((1.f-Nu)*(1.f - median)));
median = 2.f * median - 1.f;
x = 2.f * x - 1.f;
MY(isMagenta, j) = median + x > 0.f;
}
#undef MAGENTA
#undef CYAN
#undef MY
}
static void _dither_s(void* data)
{
#define MY(A, J) (mydata->A[mydata->img.asHSV.w*mydata->i+J])
tddata_t* mydata = (tddata_t*)data;
size_t j;
float median, Nu;
for(j = 0; j < mydata->img.asHSV.w; ++j) {
hsv_t p = A(mydata->img.asHSV, mydata->i, j);
#if 0
median = 1.f - p.saturation;
Nu = (float)MY(randomness, j) / (float)RAND_MAX;
//Nu = mydata->randomness[mydata->img.asHSV.w * mydata->i + j] / RAND_MAX;
MY(isGray, j) = (Nu < median + 0.00001f);
//mydata->isGray = (Nu < median + 0.00001f);
#else
Nu = (float)MY(randomness, j) / (float)RAND_MAX;
median = 1.f - p.saturation; // c
float x = (Nu < median) ? sqrtf(Nu * median) : (1.f - sqrtf((1.f-Nu)*(1.f - median)));
x = 2.f * x - 1.f;
median = 2.f * median - 1.f;
MY(isGray, j) = median + x > 0.f;
#endif
}
#undef MY
}
static void _dither_l(void* data)
{
#define MY(A, J) (mydata->A[mydata->img.asHSV.w*mydata->i+J])
tddata_t* mydata = (tddata_t*)data;
size_t j;
float median, Nu;
for(j = 0; j < mydata->img.asHSV.w; ++j) {
hsv_t p = A(mydata->img.asHSV, mydata->i, j);
#if 0
median = 1.f - (1.f - p.value); // the formula says 1-X, but we need to reverse the logic
Nu = (float)MY(randomness, j) / (float)RAND_MAX;
//Nu = mydata->randomness[mydata->img.asHSV.w * mydata->i + j] / RAND_MAX;
MY(isWhite, j) = (Nu < median + 0.00001f);
//mydata->isWhite = (Nu < median + 0.00001f);
#endif
Nu = (float)MY(randomness, j) / (float)RAND_MAX;
median = 1.f - (1.f - p.value); // c
float x = (Nu < median) ? sqrtf(Nu * median) : (1.f - sqrtf((1.f-Nu)*(1.f - median)));
x = 2.f * x - 1.f;
median = 2.f * median - 1.f;
MY(isWhite, j) = median + x > 0.f;
}
#undef MY
}
static void _output_layer(void* data)
{
#define MY(A, J) (mydata->A[mydata->img.asRGB.w*mydata->i+J])
tddata_t* mydata = (tddata_t*)data;
size_t j;
float median, Nu;
for(j = 0; j < mydata->img.asRGB.w; ++j) {
pixel_t p = A(mydata->img.asRGB, mydata->i, j);
if(MY(isGray, j)) {
if(MY(isWhite, j)) {
p.r = p.g = 255;
p.b = (!opt_alt) * 255;
} else {
// black is always black :-)
p.r = p.g = p.b = 0;
}
} else {
if(MY(isMagenta, j)) {
p.r = 255; p.g = 0; p.b = (!opt_alt) * 255;
} else {
p.r = 0; p.g = 255; p.b = (!opt_alt) * 255;
}
}
#if 0
printf("%3dx%3d: MGW, RGB = \n\t%d %d %d\n\t%x %x %x\n",
mydata->i, j,
MY(isMagenta, j), MY(isGray, j), MY(isWhite, j),
p.r, p.g, p.b);
#endif
A(mydata->img.asRGB, mydata->i, j) = p;
}
#undef MY
}
/*
Multi-step Algorithm
No. Step Colour Space
0. in RGB (r[], g[], b[])
1. RGB -> HSL (h[], s[], l[])
2. dither_m_c (isMagenta?, s[], l[])
3. dither_s (isMagenta?, isGray?, l[])
4. dither_l (isMagenta?, isGray?, isWhite?)
5. output (r[], g[], b[])
isGray?
isWhite?
out = FFFFFF
else
out = 000000
else
isMagenta?
out = FF00FF
else
out = 00FFFF
*/
img_t cgadither(img_t const img)
{
img_t ret = { img.w, img.h, (pixel_t*)malloc(img.w * img.h * sizeof(pixel_t)) };
hsvimg_t hsvimg = { img.w, img.h, (hsv_t*)malloc(img.w * img.h * sizeof(hsvimg_t)) };
size_t i, j;
tdata_t* datas;
tddata_t* ddatas;
// randomness for our threads
int* randomness = (int*)malloc(img.w * img.h * sizeof(int));
// make it predictable
srand(0);
// process
// 1. toHSL
datas = (tdata_t*)malloc(img.h * sizeof(tdata_t));
ddatas = (tddata_t*)malloc(img.h * sizeof(tddata_t));
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tdata_t* data = &datas[i];
data->i = i;
data->in.asRGB = img;
data->out.asHSV = hsvimg;
_proc_toHSL(data);
}
// 1.5 normalize to get a more full colour space
_normalize(hsvimg);
// bulk processing
// 2. dither colors to magenta or cyan
_genRandom(img.w * img.h, randomness);
uint8_t* isMagenta = (uint8_t*)malloc(img.w * img.h * sizeof(uint8_t));
uint8_t* isGray = (uint8_t*)malloc(img.w * img.h * sizeof(uint8_t));
uint8_t* isWhite = (uint8_t*)malloc(img.w * img.h * sizeof(uint8_t));
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tddata_t* data = &ddatas[i];
data->randomness = randomness;
data->i = i;
data->img.asHSV = hsvimg;
data->isMagenta = isMagenta;
_dither_m_c(data);
}
// 3. dither saturation to color or gray
_genRandom(img.w * img.h, randomness);
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tddata_t* data = &ddatas[i];
data->randomness = randomness;
data->i = i;
data->img.asHSV = hsvimg;
data->isGray = isGray;
_dither_s(data);
}
// 4. dither luma to white or black
_genRandom(img.w * img.h, randomness);
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tddata_t* data = &ddatas[i];
data->randomness = randomness;
data->i = i;
data->img.asHSV = hsvimg;
data->isWhite = isWhite;
_dither_l(data);
}
// 5. distribute pixels to C, M, black or white
#pragma omp parallel for
for(i = 0; i < img.h; ++i) {
tddata_t* data = &ddatas[i];
data->i = i;
data->img.asRGB = ret;
data->isMagenta = isMagenta;
data->isGray = isGray;
data->isWhite = isWhite;
_output_layer(data);
}
free(randomness);
free(isMagenta);
free(isGray);
free(isWhite);
free(ddatas);
free(datas);
free(hsvimg.pixels);
return ret;
}
|
progress.h | #pragma once
#include <iostream>
inline std::string center(std::string const &s, int n)
{
int l = s.length();
std::ostringstream o;
o << std::string((n - l) / 2, ' ') << s
<< std::string((n - (n - l)/2 - l), ' ');
return o.str();
}
class ProgressBar
{
size_t N, i, j;
std::string txt;
public:
ProgressBar(int N_, std::string const &txt_ = "crunching ..."):
N(N_), i(0), j(0), txt(center(txt_, 50))
{
draw();
}
void tic()
{
#pragma omp critical
{
++i;
if (i > N) i = N;
else if (i * 50 / N > j)
{
++j;
draw();
}
}
}
void finish()
{
j = 50;
draw();
std::cerr << std::endl;
}
void draw() const
{
std::cerr << "\r\033[m(\033[44;33;1m"
<< txt.substr(0, j) << "\033[m"
<< txt.substr(j, 50) << ")";
}
};
// not completely right ...
template <typename I>
class ProgressIterator: public I
{
ProgressBar B;
public:
ProgressIterator(I b, int N, std::string const &txt = "iterating ..."):
I(b), B(N, txt)
{}
~ProgressIterator()
{
B.finish();
}
ProgressIterator &operator++()
{
I::operator++();
B.tic();
return *this;
}
};
|
sum_array_in_parallel.c | // sum_array_in_parallel.c
// compile with: /openmp
/* #############################################################################
## DESCRIPTION: Vector sum in parallel with OpenMP.
## NAME: sum_array_in_parallel.c
## AUTHOR: Lucca Pessoa da Silva Matos
## DATE: 10.04.2020
## VERSION: 1.0
## EXEMPLE:
## PS C:\> gcc -fopenmp -o sum_array_in_parallel sum_array_in_parallel.c
##############################################################################*/
// =============================================================================
// LIBRARYS
// =============================================================================
#include <omp.h>
#include <stdio.h>
#include <locale.h>
#include <stdlib.h>
// =============================================================================
// MACROS
// =============================================================================
#define MAX 100
#define LOOP(i, n) for(int i = 0; i < n; i++)
// =============================================================================
// CALL FUNCTIONS
// =============================================================================
void cabecalho();
void set_portuguese();
// =============================================================================
// MAIN
// =============================================================================
int main(int argc, char const *argv[]){
set_portuguese();
cabecalho();
int i;
int *A, *B, *C;
//Alocando e inicializando os arrays
printf("\nAlocando e Inicializando os Arrays...");
A = (int *)malloc(MAX*sizeof(int));
B = (int *)malloc(MAX*sizeof(int));
C = (int *)malloc(MAX*sizeof(int));
//Inicializando valores para somar
printf("\n\nPreenchendo os Arrays com os dados a serem somados");
LOOP(i, MAX){
A[i] = i * 2;
B[i] = i * 3;
}
//Exibindo valores
printf("\n\nExibindo valores dos Arrays A e B...\n\n");
LOOP(i, MAX){
printf("\t%d \t %d\n", A[i], B[i]);
}
printf("\nEstamos fora do contextos paralelo... Iremos realizar a soma dos vetores em paralelo...\n");
//Realizando soma em paralelo - Usamos o shared para compartilhar os Arrays entre as Threads.
//Naturalmente uma Thread não pode acessar a cópia de outra Thread.
#pragma omp parallel for default(none) shared(A, B, C)
LOOP(i, MAX){
C[i] = A[i] + B[i];
}
printf("\nProcessamos a soma e saímos da região paralela...");
//Exibindo valores
printf("\n\nExibindo valores da soma dos Arrays...\n\n");
LOOP(i, MAX){
printf("\t%d\n", C[i]);
}
printf("\nFim do programa... Iremos dar um free nos Arrays alocados...\n\n");
//Free Arrays
free(A);
free(B);
free(C);
return 0;
}
// =============================================================================
// FUNCTIONS
// =============================================================================
void set_portuguese(){
setlocale(LC_ALL, "Portuguese");
}
void cabecalho(){
printf("\n**************************************************");
printf("\n* *");
printf("\n* *");
printf("\n* PROGRAMACAO PARALELA COM OPENMP - LUCCA PESSOA *");
printf("\n* *");
printf("\n* *");
printf("\n**************************************************\n");
}
|
vector_batched.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "seq_mv.h"
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassAxpy8
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorMassAxpy8( HYPRE_Complex *alpha,
hypre_Vector **x,
hypre_Vector *y, HYPRE_Int k)
{
HYPRE_Complex *x_data = hypre_VectorData(x[0]);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x[0]);
HYPRE_Int i, j, jstart, restk;
restk = (k - (k / 8 * 8));
if (k > 7)
{
for (j = 0; j < k - 7; j += 8)
{
jstart = j * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size]
+ alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i]
+ alpha[j + 4] * x_data[(j + 4) * size + i] + alpha[j + 5] * x_data[(j + 5) * size + i]
+ alpha[j + 6] * x_data[(j + 6) * size + i] + alpha[j + 7] * x_data[(j + 7) * size + i];
}
}
}
if (restk == 1)
{
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 1] * x_data[jstart + i];
}
}
else if (restk == 2)
{
jstart = (k - 2) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i];
}
}
else if (restk == 3)
{
jstart = (k - 3) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k
- 1] *
x_data[(k - 1) * size + i];
}
}
else if (restk == 4)
{
jstart = (k - 4) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i]
+ alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i];
}
}
else if (restk == 5)
{
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += + alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i]
+ alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i]
+ alpha[k - 1] * x_data[(k - 1) * size + i];
}
}
else if (restk == 6)
{
jstart = (k - 6) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 6] * x_data[jstart + i] + alpha[k - 5] * x_data[jstart + i + size]
+ alpha[k - 4] * x_data[(k - 4) * size + i] + alpha[k - 3] * x_data[(k - 3) * size + i]
+ alpha[k - 2] * x_data[(k - 2) * size + i] + alpha[k - 1] * x_data[(k - 1) * size + i];
}
}
else if (restk == 7)
{
jstart = (k - 7) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 7] * x_data[jstart + i] + alpha[k - 6] * x_data[jstart + i + size]
+ alpha[k - 5] * x_data[(k - 5) * size + i] + alpha[k - 4] * x_data[(k - 4) * size + i]
+ alpha[k - 3] * x_data[(k - 3) * size + i] + alpha[k - 2] * x_data[(k - 2) * size + i]
+ alpha[k - 1] * x_data[(k - 1) * size + i];
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassAxpy4
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorMassAxpy4( HYPRE_Complex *alpha,
hypre_Vector **x,
hypre_Vector *y, HYPRE_Int k)
{
HYPRE_Complex *x_data = hypre_VectorData(x[0]);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x[0]);
HYPRE_Int i, j, jstart, restk;
restk = (k - (k / 4 * 4));
if (k > 3)
{
for (j = 0; j < k - 3; j += 4)
{
jstart = j * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[j] * x_data[jstart + i] + alpha[j + 1] * x_data[jstart + i + size]
+ alpha[j + 2] * x_data[(j + 2) * size + i] + alpha[j + 3] * x_data[(j + 3) * size + i];
}
}
}
if (restk == 1)
{
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 1] * x_data[jstart + i];
}
}
else if (restk == 2)
{
jstart = (k - 2) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 2] * x_data[jstart + i] + alpha[k - 1] * x_data[jstart + size + i];
}
}
else if (restk == 3)
{
jstart = (k - 3) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[k - 3] * x_data[jstart + i] + alpha[k - 2] * x_data[jstart + size + i] + alpha[k
- 1] *
x_data[(k - 1) * size + i];
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqVectorMassAxpy( HYPRE_Complex *alpha,
hypre_Vector **x,
hypre_Vector *y, HYPRE_Int k, HYPRE_Int unroll)
{
HYPRE_Complex *x_data = hypre_VectorData(x[0]);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int size = hypre_VectorSize(x[0]);
HYPRE_Int i, j, jstart;
if (unroll == 8)
{
hypre_SeqVectorMassAxpy8(alpha, x, y, k);
return hypre_error_flag;
}
else if (unroll == 4)
{
hypre_SeqVectorMassAxpy4(alpha, x, y, k);
return hypre_error_flag;
}
else
{
for (j = 0; j < k; j++)
{
jstart = j * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
y_data[i] += alpha[j] * x_data[jstart + i];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassInnerProd8
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqVectorMassInnerProd8( hypre_Vector *x,
hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y[0]);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, restk;
HYPRE_Real res1;
HYPRE_Real res2;
HYPRE_Real res3;
HYPRE_Real res4;
HYPRE_Real res5;
HYPRE_Real res6;
HYPRE_Real res7;
HYPRE_Real res8;
HYPRE_Int jstart;
HYPRE_Int jstart1;
HYPRE_Int jstart2;
HYPRE_Int jstart3;
HYPRE_Int jstart4;
HYPRE_Int jstart5;
HYPRE_Int jstart6;
HYPRE_Int jstart7;
restk = (k - (k / 8 * 8));
if (k > 7)
{
for (j = 0; j < k - 7; j += 8)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
res5 = 0;
res6 = 0;
res7 = 0;
res8 = 0;
jstart = j * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
jstart6 = jstart5 + size;
jstart7 = jstart6 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7,res8) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];
res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];
res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i];
res8 += hypre_conj(y_data[jstart7 + i]) * x_data[i];
}
result[j] = res1;
result[j + 1] = res2;
result[j + 2] = res3;
result[j + 3] = res4;
result[j + 4] = res5;
result[j + 5] = res6;
result[j + 6] = res7;
result[j + 7] = res8;
}
}
if (restk == 1)
{
res1 = 0;
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
}
result[k - 1] = res1;
}
else if (restk == 2)
{
res1 = 0;
res2 = 0;
jstart = (k - 2) * size;
jstart1 = jstart + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
}
result[k - 2] = res1;
result[k - 1] = res2;
}
else if (restk == 3)
{
res1 = 0;
res2 = 0;
res3 = 0;
jstart = (k - 3) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
}
result[k - 3] = res1;
result[k - 2] = res2;
result[k - 1] = res3;
}
else if (restk == 4)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
jstart = (k - 4) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
}
result[k - 4] = res1;
result[k - 3] = res2;
result[k - 2] = res3;
result[k - 1] = res4;
}
else if (restk == 5)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
res5 = 0;
jstart = (k - 5) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];
}
result[k - 5] = res1;
result[k - 4] = res2;
result[k - 3] = res3;
result[k - 2] = res4;
result[k - 1] = res5;
}
else if (restk == 6)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
res5 = 0;
res6 = 0;
jstart = (k - 6) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];
res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];
}
result[k - 6] = res1;
result[k - 5] = res2;
result[k - 4] = res3;
result[k - 3] = res4;
result[k - 2] = res5;
result[k - 1] = res6;
}
else if (restk == 7)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
res5 = 0;
res6 = 0;
res7 = 0;
jstart = (k - 7) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
jstart6 = jstart5 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4,res5,res6,res7) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
res5 += hypre_conj(y_data[jstart4 + i]) * x_data[i];
res6 += hypre_conj(y_data[jstart5 + i]) * x_data[i];
res7 += hypre_conj(y_data[jstart6 + i]) * x_data[i];
}
result[k - 7] = res1;
result[k - 6] = res2;
result[k - 5] = res3;
result[k - 4] = res4;
result[k - 3] = res5;
result[k - 2] = res6;
result[k - 1] = res7;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassInnerProd4
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqVectorMassInnerProd4( hypre_Vector *x,
hypre_Vector **y, HYPRE_Int k, HYPRE_Real *result)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y[0]);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, restk;
HYPRE_Real res1;
HYPRE_Real res2;
HYPRE_Real res3;
HYPRE_Real res4;
HYPRE_Int jstart;
HYPRE_Int jstart1;
HYPRE_Int jstart2;
HYPRE_Int jstart3;
restk = (k - (k / 4 * 4));
if (k > 3)
{
for (j = 0; j < k - 3; j += 4)
{
res1 = 0;
res2 = 0;
res3 = 0;
res4 = 0;
jstart = j * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3,res4) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
res4 += hypre_conj(y_data[jstart3 + i]) * x_data[i];
}
result[j] = res1;
result[j + 1] = res2;
result[j + 2] = res3;
result[j + 3] = res4;
}
}
if (restk == 1)
{
res1 = 0;
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
}
result[k - 1] = res1;
}
else if (restk == 2)
{
res1 = 0;
res2 = 0;
jstart = (k - 2) * size;
jstart1 = jstart + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
}
result[k - 2] = res1;
result[k - 1] = res2;
}
else if (restk == 3)
{
res1 = 0;
res2 = 0;
res3 = 0;
jstart = (k - 3) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res1,res2,res3) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res1 += hypre_conj(y_data[jstart + i]) * x_data[i];
res2 += hypre_conj(y_data[jstart1 + i]) * x_data[i];
res3 += hypre_conj(y_data[jstart2 + i]) * x_data[i];
}
result[k - 3] = res1;
result[k - 2] = res2;
result[k - 1] = res3;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassDotpTwo8
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqVectorMassDotpTwo8( hypre_Vector *x, hypre_Vector *y,
hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Complex *z_data = hypre_VectorData(z[0]);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, restk;
HYPRE_Real res_x1;
HYPRE_Real res_x2;
HYPRE_Real res_x3;
HYPRE_Real res_x4;
HYPRE_Real res_x5;
HYPRE_Real res_x6;
HYPRE_Real res_x7;
HYPRE_Real res_x8;
HYPRE_Real res_y1;
HYPRE_Real res_y2;
HYPRE_Real res_y3;
HYPRE_Real res_y4;
HYPRE_Real res_y5;
HYPRE_Real res_y6;
HYPRE_Real res_y7;
HYPRE_Real res_y8;
HYPRE_Int jstart;
HYPRE_Int jstart1;
HYPRE_Int jstart2;
HYPRE_Int jstart3;
HYPRE_Int jstart4;
HYPRE_Int jstart5;
HYPRE_Int jstart6;
HYPRE_Int jstart7;
restk = (k - (k / 8 * 8));
if (k > 7)
{
for (j = 0; j < k - 7; j += 8)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_x5 = 0;
res_x6 = 0;
res_x7 = 0;
res_x8 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
res_y5 = 0;
res_y6 = 0;
res_y7 = 0;
res_y8 = 0;
jstart = j * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
jstart6 = jstart5 + size;
jstart7 = jstart6 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_x8,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7,res_y8) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];
res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];
res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];
res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];
res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i];
res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i];
res_x8 += hypre_conj(z_data[jstart7 + i]) * x_data[i];
res_y8 += hypre_conj(z_data[jstart7 + i]) * y_data[i];
}
result_x[j] = res_x1;
result_x[j + 1] = res_x2;
result_x[j + 2] = res_x3;
result_x[j + 3] = res_x4;
result_x[j + 4] = res_x5;
result_x[j + 5] = res_x6;
result_x[j + 6] = res_x7;
result_x[j + 7] = res_x8;
result_y[j] = res_y1;
result_y[j + 1] = res_y2;
result_y[j + 2] = res_y3;
result_y[j + 3] = res_y4;
result_y[j + 4] = res_y5;
result_y[j + 5] = res_y6;
result_y[j + 6] = res_y7;
result_y[j + 7] = res_y8;
}
}
if (restk == 1)
{
res_x1 = 0;
res_y1 = 0;
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
}
result_x[k - 1] = res_x1;
result_y[k - 1] = res_y1;
}
else if (restk == 2)
{
res_x1 = 0;
res_x2 = 0;
res_y1 = 0;
res_y2 = 0;
jstart = (k - 2) * size;
jstart1 = jstart + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
}
result_x[k - 2] = res_x1;
result_x[k - 1] = res_x2;
result_y[k - 2] = res_y1;
result_y[k - 1] = res_y2;
}
else if (restk == 3)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
jstart = (k - 3) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
}
result_x[k - 3] = res_x1;
result_x[k - 2] = res_x2;
result_x[k - 1] = res_x3;
result_y[k - 3] = res_y1;
result_y[k - 2] = res_y2;
result_y[k - 1] = res_y3;
}
else if (restk == 4)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
jstart = (k - 4) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
}
result_x[k - 4] = res_x1;
result_x[k - 3] = res_x2;
result_x[k - 2] = res_x3;
result_x[k - 1] = res_x4;
result_y[k - 4] = res_y1;
result_y[k - 3] = res_y2;
result_y[k - 2] = res_y3;
result_y[k - 1] = res_y4;
}
else if (restk == 5)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_x5 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
res_y5 = 0;
jstart = (k - 5) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_y1,res_y2,res_y3,res_y4,res_y5) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];
res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];
}
result_x[k - 5] = res_x1;
result_x[k - 4] = res_x2;
result_x[k - 3] = res_x3;
result_x[k - 2] = res_x4;
result_x[k - 1] = res_x5;
result_y[k - 5] = res_y1;
result_y[k - 4] = res_y2;
result_y[k - 3] = res_y3;
result_y[k - 2] = res_y4;
result_y[k - 1] = res_y5;
}
else if (restk == 6)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_x5 = 0;
res_x6 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
res_y5 = 0;
res_y6 = 0;
jstart = (k - 6) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];
res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];
res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];
res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];
}
result_x[k - 6] = res_x1;
result_x[k - 5] = res_x2;
result_x[k - 4] = res_x3;
result_x[k - 3] = res_x4;
result_x[k - 2] = res_x5;
result_x[k - 1] = res_x6;
result_y[k - 6] = res_y1;
result_y[k - 5] = res_y2;
result_y[k - 4] = res_y3;
result_y[k - 3] = res_y4;
result_y[k - 2] = res_y5;
result_y[k - 1] = res_y6;
}
else if (restk == 7)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_x5 = 0;
res_x6 = 0;
res_x7 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
res_y5 = 0;
res_y6 = 0;
res_y7 = 0;
jstart = (k - 7) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
jstart4 = jstart3 + size;
jstart5 = jstart4 + size;
jstart6 = jstart5 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_x5,res_x6,res_x7,res_y1,res_y2,res_y3,res_y4,res_y5,res_y6,res_y7) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
res_x5 += hypre_conj(z_data[jstart4 + i]) * x_data[i];
res_y5 += hypre_conj(z_data[jstart4 + i]) * y_data[i];
res_x6 += hypre_conj(z_data[jstart5 + i]) * x_data[i];
res_y6 += hypre_conj(z_data[jstart5 + i]) * y_data[i];
res_x7 += hypre_conj(z_data[jstart6 + i]) * x_data[i];
res_y7 += hypre_conj(z_data[jstart6 + i]) * y_data[i];
}
result_x[k - 7] = res_x1;
result_x[k - 6] = res_x2;
result_x[k - 5] = res_x3;
result_x[k - 4] = res_x4;
result_x[k - 3] = res_x5;
result_x[k - 2] = res_x6;
result_x[k - 1] = res_x7;
result_y[k - 7] = res_y1;
result_y[k - 6] = res_y2;
result_y[k - 5] = res_y3;
result_y[k - 4] = res_y4;
result_y[k - 3] = res_y5;
result_y[k - 2] = res_y6;
result_y[k - 1] = res_y7;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassDotpTwo4
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqVectorMassDotpTwo4( hypre_Vector *x, hypre_Vector *y,
hypre_Vector **z, HYPRE_Int k, HYPRE_Real *result_x, HYPRE_Real *result_y)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Complex *z_data = hypre_VectorData(z[0]);
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, restk;
HYPRE_Real res_x1;
HYPRE_Real res_x2;
HYPRE_Real res_x3;
HYPRE_Real res_x4;
HYPRE_Real res_y1;
HYPRE_Real res_y2;
HYPRE_Real res_y3;
HYPRE_Real res_y4;
HYPRE_Int jstart;
HYPRE_Int jstart1;
HYPRE_Int jstart2;
HYPRE_Int jstart3;
restk = (k - (k / 4 * 4));
if (k > 3)
{
for (j = 0; j < k - 3; j += 4)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_x4 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
res_y4 = 0;
jstart = j * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
jstart3 = jstart2 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_x4,res_y1,res_y2,res_y3,res_y4) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
res_x4 += hypre_conj(z_data[jstart3 + i]) * x_data[i];
res_y4 += hypre_conj(z_data[jstart3 + i]) * y_data[i];
}
result_x[j] = res_x1;
result_x[j + 1] = res_x2;
result_x[j + 2] = res_x3;
result_x[j + 3] = res_x4;
result_y[j] = res_y1;
result_y[j + 1] = res_y2;
result_y[j + 2] = res_y3;
result_y[j + 3] = res_y4;
}
}
if (restk == 1)
{
res_x1 = 0;
res_y1 = 0;
jstart = (k - 1) * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_y1) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
}
result_x[k - 1] = res_x1;
result_y[k - 1] = res_y1;
}
else if (restk == 2)
{
res_x1 = 0;
res_x2 = 0;
res_y1 = 0;
res_y2 = 0;
jstart = (k - 2) * size;
jstart1 = jstart + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_y1,res_y2) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
}
result_x[k - 2] = res_x1;
result_x[k - 1] = res_x2;
result_y[k - 2] = res_y1;
result_y[k - 1] = res_y2;
}
else if (restk == 3)
{
res_x1 = 0;
res_x2 = 0;
res_x3 = 0;
res_y1 = 0;
res_y2 = 0;
res_y3 = 0;
jstart = (k - 3) * size;
jstart1 = jstart + size;
jstart2 = jstart1 + size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x1,res_x2,res_x3,res_y1,res_y2,res_y3) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x1 += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y1 += hypre_conj(z_data[jstart + i]) * y_data[i];
res_x2 += hypre_conj(z_data[jstart1 + i]) * x_data[i];
res_y2 += hypre_conj(z_data[jstart1 + i]) * y_data[i];
res_x3 += hypre_conj(z_data[jstart2 + i]) * x_data[i];
res_y3 += hypre_conj(z_data[jstart2 + i]) * y_data[i];
}
result_x[k - 3] = res_x1;
result_x[k - 2] = res_x2;
result_x[k - 1] = res_x3;
result_y[k - 3] = res_y1;
result_y[k - 2] = res_y2;
result_y[k - 1] = res_y3;
}
return hypre_error_flag;
}
HYPRE_Int hypre_SeqVectorMassInnerProd( hypre_Vector *x,
hypre_Vector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y[0]);
HYPRE_Real res;
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, jstart;
if (unroll == 8)
{
hypre_SeqVectorMassInnerProd8(x, y, k, result);
return hypre_error_flag;
}
else if (unroll == 4)
{
hypre_SeqVectorMassInnerProd4(x, y, k, result);
return hypre_error_flag;
}
else
{
for (j = 0; j < k; j++)
{
res = 0;
jstart = j * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res += hypre_conj(y_data[jstart + i]) * x_data[i];
}
result[j] = res;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_SeqVectorMassDotpTwo
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqVectorMassDotpTwo( hypre_Vector *x, hypre_Vector *y,
hypre_Vector **z, HYPRE_Int k, HYPRE_Int unroll,
HYPRE_Real *result_x, HYPRE_Real *result_y)
{
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Complex *z_data = hypre_VectorData(z[0]);
HYPRE_Real res_x, res_y;
HYPRE_Int size = hypre_VectorSize(x);
HYPRE_Int i, j, jstart;
if (unroll == 8)
{
hypre_SeqVectorMassDotpTwo8(x, y, z, k, result_x, result_y);
return hypre_error_flag;
}
else if (unroll == 4)
{
hypre_SeqVectorMassDotpTwo4(x, y, z, k, result_x, result_y);
return hypre_error_flag;
}
else
{
for (j = 0; j < k; j++)
{
res_x = 0; //result_x[j];
res_y = 0; //result_y[j];
jstart = j * size;
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for private(i) reduction(+:res_x,res_y) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < size; i++)
{
res_x += hypre_conj(z_data[jstart + i]) * x_data[i];
res_y += hypre_conj(z_data[jstart + i]) * y_data[i];
}
result_x[j] = res_x;
result_y[j] = res_y;
}
}
return hypre_error_flag;
}
|
GB_unop__identity_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_int32)
// op(A') function: GB (_unop_tran__identity_fp32_int32)
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_int32)
(
float *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
misc.c | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
//------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
//#define OMP_GPU_OFFLOAD
void zero_vector(level_type * level, int id_a){
// zero's the entire grid INCLUDING ghost zones...
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
double * __restrict__ vector_grid = vector_base;
int index_grid = id_a * num_my_boxes*box_volume;
int size = level->num_my_boxes * level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(to:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
map(from:vector_grid[index_grid: index_grid + size]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
int ilo = my_blocks[block].read.i;
int jlo = my_blocks[block].read.j;
int klo = my_blocks[block].read.k;
int ihi = my_blocks[block].dim.i + ilo;
int jhi = my_blocks[block].dim.j + jlo;
int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
const int dim = my_boxes[box].dim;
// expand the size of the block to include the ghost zones...
if(ilo<= 0)ilo-=ghosts;
if(jlo<= 0)jlo-=ghosts;
if(klo<= 0)klo-=ghosts;
if(ihi>=dim)ihi+=ghosts;
if(jhi>=dim)jhi+=ghosts;
if(khi>=dim)khi+=ghosts;
double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid[ijk] = 0.0;
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
void init_vector(level_type * level, int id_a, double scalar){
// initializes the grid to a scalar while zero'ing the ghost zones...
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
int ilo = my_blocks[block].read.i;
int jlo = my_blocks[block].read.j;
int klo = my_blocks[block].read.k;
int ihi = my_blocks[block].dim.i + ilo;
int jhi = my_blocks[block].dim.j + jlo;
int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
const int dim = my_boxes[box].dim;
// expand the size of the block to include the ghost zones...
if(ilo<= 0)ilo-=ghosts;
if(jlo<= 0)jlo-=ghosts;
if(klo<= 0)klo-=ghosts;
if(ihi>=dim)ihi+=ghosts;
if(jhi>=dim)jhi+=ghosts;
if(khi>=dim)khi+=ghosts;
double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
int ghostZone = (i<0) || (j<0) || (k<0) || (i>=dim) || (j>=dim) || (k>=dim);
grid[ijk] = ghostZone ? 0.0 : scalar;
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// add vectors id_a (scaled by scale_a) and id_b (scaled by scale_b) and store the result in vector id_c
// i.e. c[] = scale_a*a[] + scale_b*b[]
// note, only non ghost zone values are included in this calculation
void add_vectors(level_type * level, int id_c, double scale_a, int id_a, double scale_b, int id_b){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, id_b, id_c, scale_a, scale_b)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_b = &vector_base[id_b*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a*grid_a[ijk] + scale_b*grid_b[ijk];
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// multiply each element of vector id_a by vector id_b and scale, and place the result in vector id_c
// i.e. c[]=scale*a[]*b[]
// note, only non ghost zone values are included in this calculation
void mul_vectors(level_type * level, int id_c, double scale, int id_a, int id_b){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, id_b, id_c, scale)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_b = &vector_base[id_b*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale*grid_a[ijk]*grid_b[ijk];
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// invert each element of vector id_a, scale by scale_a, and place the result in vector id_c
// i.e. c[]=scale_a/a[]
// note, only non ghost zone values are included in this calculation
void invert_vector(level_type * level, int id_c, double scale_a, int id_a){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, id_c, scale_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a/grid_a[ijk];
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// scale vector id_a by scale_a and place the result in vector id_c
// i.e. c[]=scale_a*a[]
// note, only non ghost zone values are included in this calculation
void scale_vector(level_type * level, int id_c, double scale_a, int id_a){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
double * __restrict__ vector_grid_a = vector_base;
double * __restrict__ vector_grid_c = vector_base;
int index_grid_a = id_a * num_my_boxes*box_volume;
int index_grid_c = id_c * num_my_boxes*box_volume;
int size = level->num_my_boxes * level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(to:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
map(from:vector_grid_c[index_grid_c:index_grid_c + size]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, id_c, scale_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_c = &vector_grid_c[id_c*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = scale_a*grid_a[ijk];
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// return the dot product of vectors id_a and id_b
// note, only non ghost zone values are included in this calculation
double dot(level_type * level, int id_a, int id_b){
double _timeStart = getTime();
int block;
double a_dot_b_level = 0.0;
// TODO: Fix this
//PRAGMA_THREAD_ACROSS_BLOCKS_SUM(level,block,level->num_my_blocks,a_dot_b_level)
for(block=0;block<level->num_my_blocks;block++){
const int box = level->my_blocks[block].read.box;
const int ilo = level->my_blocks[block].read.i;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int ihi = level->my_blocks[block].dim.i + ilo;
const int jhi = level->my_blocks[block].dim.j + jlo;
const int khi = level->my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double * __restrict__ grid_b = level->my_boxes[box].vectors[id_b] + ghosts*(1+jStride+kStride);
double a_dot_b_block = 0.0;
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
a_dot_b_block += grid_a[ijk]*grid_b[ijk];
}}}
a_dot_b_level+=a_dot_b_block;
}
level->timers.blas1 += (double)(getTime()-_timeStart);
#ifdef USE_MPI
double _timeStartAllReduce = getTime();
double send = a_dot_b_level;
MPI_Allreduce(&send,&a_dot_b_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE);
double _timeEndAllReduce = getTime();
level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(a_dot_b_level);
}
//------------------------------------------------------------------------------------------------------------------------------
// return the max (infinity) norm of the vector id_a.
// note, only non ghost zone values are included in this calculation
double norm(level_type * level, int id_a){ // implements the max norm
double _timeStart = getTime();
int block;
double max_norm = 0.0;
// TODO: Fix this
//PRAGMA_THREAD_ACROSS_BLOCKS_MAX(level,block,level->num_my_blocks,max_norm)
for(block=0;block<level->num_my_blocks;block++){
const int box = level->my_blocks[block].read.box;
const int ilo = level->my_blocks[block].read.i;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int ihi = level->my_blocks[block].dim.i + ilo;
const int jhi = level->my_blocks[block].dim.j + jlo;
const int khi = level->my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
double * __restrict__ grid = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double block_norm = 0.0;
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
double fabs_grid_ijk = fabs(grid[ijk]);
if(fabs_grid_ijk>block_norm){block_norm=fabs_grid_ijk;} // max norm
}}}
if(block_norm>max_norm){max_norm = block_norm;}
} // block list
level->timers.blas1 += (double)(getTime()-_timeStart);
#ifdef USE_MPI
double _timeStartAllReduce = getTime();
double send = max_norm;
MPI_Allreduce(&send,&max_norm,1,MPI_DOUBLE,MPI_MAX,level->MPI_COMM_ALLREDUCE);
double _timeEndAllReduce = getTime();
level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce);
#endif
return(max_norm);
}
//------------------------------------------------------------------------------------------------------------------------------
// return the mean (arithmetic average value) of vector id_a
// essentially, this is a l1 norm by a scaling by the inverse of the total (global) number of cells
// note, only non ghost zone values are included in this calculation
double mean(level_type * level, int id_a){
double _timeStart = getTime();
int block;
double sum_level = 0.0;
// TODO: Add target pragma once reduction feature is implemented
PRAGMA_THREAD_ACROSS_BLOCKS_SUM(level,block,level->num_my_blocks,sum_level)
for(block=0;block<level->num_my_blocks;block++){
const int box = level->my_blocks[block].read.box;
const int ilo = level->my_blocks[block].read.i;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int ihi = level->my_blocks[block].dim.i + ilo;
const int jhi = level->my_blocks[block].dim.j + jlo;
const int khi = level->my_blocks[block].dim.k + klo;
int i,j,k;
int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int ghosts = level->my_boxes[box].ghosts;
double * __restrict__ grid_a = level->my_boxes[box].vectors[id_a] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point
double sum_block = 0.0;
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
sum_block += grid_a[ijk];
}}}
sum_level+=sum_block;
}
level->timers.blas1 += (double)(getTime()-_timeStart);
double ncells_level = (double)level->dim.i*(double)level->dim.j*(double)level->dim.k;
#ifdef USE_MPI
double _timeStartAllReduce = getTime();
double send = sum_level;
MPI_Allreduce(&send,&sum_level,1,MPI_DOUBLE,MPI_SUM,level->MPI_COMM_ALLREDUCE);
double _timeEndAllReduce = getTime();
level->timers.collectives += (double)(_timeEndAllReduce-_timeStartAllReduce);
#endif
double mean_level = sum_level / ncells_level;
return(mean_level);
}
//------------------------------------------------------------------------------------------------------------------------------
// add the scalar value shift_a to each element of vector id_a and store the result in vector id_c
// note, only non ghost zone values are included in this calculation
void shift_vector(level_type * level, int id_c, int id_a, double shift_a){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, id_c, shift_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid_a = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
double * __restrict__ grid_c = &vector_base[id_c*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid_c[ijk] = grid_a[ijk] + shift_a;
}}}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// calculate the error between two vectors (id_a and id_b) using either the max (infinity) norm or the L2 norm
// note, only non ghost zone values are included in this calculation
double error(level_type * level, int id_a, int id_b){
double h3 = level->h * level->h * level->h;
add_vectors(level,VECTOR_TEMP,1.0,id_a,-1.0,id_b); // VECTOR_TEMP = id_a - id_b
double max = norm(level,VECTOR_TEMP); return(max); // max norm of error function
double L2 = sqrt( dot(level,VECTOR_TEMP,VECTOR_TEMP)*h3);return( L2); // normalized L2 error ?
}
//------------------------------------------------------------------------------------------------------------------------------
// Color the vector id_a with 1's and 0's
// The pattern is dictated by the number of colors in each dimension and the 'active' color (i,j,kcolor)
// note, only non ghost zone values are included in this calculation
// e.g. colors_in_each_dim=3, icolor=1, jcolor=2...
// -+---+---+---+-
// | 0 | 1 | 0 |
// -+---+---+---+-
// | 0 | 0 | 0 |
// -+---+---+---+-
// | 0 | 0 | 0 |
// -+---+---+---+-
//
void color_vector(level_type * level, int id_a, int colors_in_each_dim, int icolor, int jcolor, int kcolor){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
double * __restrict__ vector_grid = vector_base;
int index_grid = id_a * num_my_boxes*box_volume;
int size = level->num_my_boxes * level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_grid[index_grid: index_grid + size]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a, colors_in_each_dim, \
icolor, jcolor, kcolor)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
const int boxlowi = my_boxes[box].low.i;
const int boxlowj = my_boxes[box].low.j;
const int boxlowk = my_boxes[box].low.k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
int i,j,k;
double * __restrict__ grid = &vector_grid[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
double sk=0.0;
if( ((k+boxlowk+kcolor)%colors_in_each_dim) == 0 )
sk=1.0; // if colors_in_each_dim==1 (don't color), all cells are set to 1.0
for(j=jlo;j<jhi;j++){
double sj=0.0;
if( ((j+boxlowj+jcolor)%colors_in_each_dim) == 0 )
sj=1.0;
for(i=ilo;i<ihi;i++){
double si=0.0;
if( ((i+boxlowi+icolor)%colors_in_each_dim) == 0 )
si=1.0;
int ijk = i + j*jStride + k*kStride;
grid[ijk] = si*sj*sk;
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
// Initialize each element of vector id_a with a "random" value.
// For simplicity, random is defined as -1.0 or +1.0 and is based on whether the coordinates of the element are even or odd
// note, only non ghost zone values are included in this calculation
void random_vector(level_type * level, int id_a){
double _timeStart = getTime();
int block;
blockCopy_type * my_blocks = level->my_blocks;
box_type * my_boxes = level->my_boxes;
double * vector_base = level->vectors[0];
int num_my_blocks = level->num_my_blocks;
int num_my_boxes = level->num_my_boxes;
int num_vectors = level->numVectors;
int box_volume = level->box_volume;
#pragma omp target \
map(to:my_blocks[0:num_my_blocks], my_boxes[0:num_my_boxes]) \
map(tofrom:vector_base[0:(num_vectors*num_my_boxes*box_volume)]) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
#pragma omp teams distribute \
firstprivate(num_my_blocks, num_my_boxes, num_vectors, \
box_volume, id_a)
for(block=0;block<num_my_blocks;block++){
const int box = my_blocks[block].read.box;
const int ilo = my_blocks[block].read.i;
const int jlo = my_blocks[block].read.j;
const int klo = my_blocks[block].read.k;
const int ihi = my_blocks[block].dim.i + ilo;
const int jhi = my_blocks[block].dim.j + jlo;
const int khi = my_blocks[block].dim.k + klo;
int i,j,k;
const int jStride = my_boxes[box].jStride;
const int kStride = my_boxes[box].kStride;
const int ghosts = my_boxes[box].ghosts;
double * __restrict__ grid = &vector_base[id_a*num_my_boxes*box_volume +
box*box_volume +
ghosts*(1+jStride+kStride)];
#pragma omp parallel for collapse(3) \
if(num_my_blocks >= GPU_THRESHOLD && GPU_OFFLOAD_ENABLE)
for(k=klo;k<khi;k++){
for(j=jlo;j<jhi;j++){
for(i=ilo;i<ihi;i++){
int ijk = i + j*jStride + k*kStride;
grid[ijk] = -1.000 + 2.0*(i^j^k^0x1);
}
}
}
}
level->timers.blas1 += (double)(getTime()-_timeStart);
}
//------------------------------------------------------------------------------------------------------------------------------
|
morn_list.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_ptc.h"
struct HandleListCreate
{
MList *list;
MChain *property;
int64_t reserve[8];
int writeable;
int num;
void **data;
MMemory *memory;
int defrag_size;
int read_order;
};
void endListCreate(struct HandleListCreate *handle)
{
mException((handle->list == NULL),EXIT,"invalid list");
if(handle->property!=NULL) mChainRelease(handle->property);
if(handle->memory !=NULL) mMemoryRelease(handle->memory);
if(handle->data != NULL) mFree(handle->data);
memset(handle->list,0,sizeof(MList));
mFree(((MList **)(handle->list))-1);
}
#define HASH_ListCreate 0xfa6c59f
MList *ListCreate(int num,void **data)
{
MList **phandle = (MList **)mMalloc(sizeof(MList *)+sizeof(MList));
MList *list = (MList *)(phandle+1);
memset(list,0,sizeof(MList));
*phandle=mHandleCreate();
MHandle *hdl=mHandle(list,ListCreate);
struct HandleListCreate *handle = (struct HandleListCreate *)(hdl->handle);
handle->list = list;
if(num<0) num = 0;
handle->num = num;
list->num = num;
if(num>0)
{
handle->data = (void **)mMalloc(num*sizeof(void *));
if(!INVALID_POINTER(data)) memcpy(handle->data,data,num*sizeof(void *));
else memset(handle->data, 0,num*sizeof(void *));
}
else
mException((!INVALID_POINTER(data)),EXIT,"invalid input");
mPropertyFunction(list,"device",mornMemoryDevice,NULL);
list->data = handle->data;
return list;
}
void mListRelease(MList *list)
{
mHandleRelease(list);
}
void m_ListAppend(MList *list,void **data,int n)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
if(n<0) n=list->num+1;
else mException(n<list->num,EXIT,"invalid list append number");
struct HandleListCreate *handle= (struct HandleListCreate *)(ObjHandle(list,0)->handle);
if(n<=handle->num)
{
if((list->data!= handle->data)&&(list->num>0))
memcpy(handle->data,list->data,list->num*sizeof(void *));
if(data!=NULL) memcpy(handle->data,data,(n-list->num)*sizeof(void *));
list->data = handle->data;
list->num = n;
return;
}
// printf("aaaaaaaaaaaaaa\n");
int num = list->num + MAX(MAX(128,n-list->num),(list->num)>>1);
void **list_data = (void **)mMalloc(num*sizeof(void *));
if(list->num>0)
memcpy(list_data,list->data,(list->num)*sizeof(void *));
memset(list_data+list->num,0,(num-list->num)*sizeof(void *));
if(data!=NULL) memcpy(list_data+list->num,data,(n-list->num)*sizeof(void *));
if(handle->data != NULL) mFree(handle->data);
handle->data = list_data;
handle->num = num;
list->data = handle->data;
list->num = n;
}
void mListPlace(MList *list,void *data,int num,int size)
{
if(num<=0) return;
mException((size<=0),EXIT,"invalid input list element size");
int list_num = list->num;
mListAppend(list,list_num+num);
struct HandleListCreate *handle = (struct HandleListCreate *)(ObjHandle(list,0)->handle);
void **idx = list->data+list_num;
if(handle->memory == NULL) handle->memory = mMemoryCreate(1,size*num,MORN_HOST);
else mMemoryAppend(handle->memory,size*num);
mMemoryIndex(handle->memory,num,size,&idx,1);
// printf("list_num=%d\n",list_num);
// printf("idx0=%p,list->data[0]=%p\n",idx[0],list->data[0]);
if(data==NULL) return;
char *p=(char *)data;
for(int i=0;i<num;i++) {memcpy(list->data[list_num+i],p,size);p+=size;}
}
// void mListOperate(MList *list,void (*func)(void *,void *),void *para)
// {
// for(int i=0;i<list->num;i++) func(list->data[i],para);
// }
// struct HandleListWrite
// {
// int defrag_size;
// };
// void endListWrite(void *info) {}
// #define HASH_ListWrite 0x40aea976
void *mListWrite(MList *list,int n,void *data,int size)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
mException((n>list->num),EXIT,"invalid write location %d(with list->num is %d)",n,list->num);
if(size<0)
{
mException((INVALID_POINTER(data)),EXIT,"invalid data to write,which is %p",data);
size = strlen((char *)data)+1;
}
struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle);
if(n<0) n = list->num;
if(handle0->memory == NULL) handle0->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST);
void *ptr = mMemoryWrite(handle0->memory,data,size);
int flag = (n==list->num); if(!flag) flag=(list->data[n]==NULL);
if(flag)
{
if(n<handle0->num) list->num = n+1;
else mListAppend(list,DFLT);
list->data[n] = ptr;
}
else
{
list->data[n] = ptr;
handle0->defrag_size += size;
if(handle0->defrag_size>16384)
{
mListElementOperate(list,MemoryCollect,handle0->memory);
MemoryDefrag(handle0->memory);
handle0->defrag_size=0;
}
}
return list->data[n];
}
// struct HandleListRead
// {
// int read_order;
// };
// void endListRead(void *info) {}
// #define HASH_ListRead 0x537cc305
void *mListRead(MList *list,int n,void *data,int size)
{
mException(INVALID_POINTER(list),EXIT,"invalid input");
struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle);
// MHandle *hdl=mHandle(list,ListRead);
// struct HandleListRead *handle = (struct HandleListRead *)(hdl->handle);
// if(hdl->valid == 0) handle->read_order = -1;
// hdl->valid = 1;
if(n<0) n = handle0->read_order;
handle0->read_order = n+1;
if(n>=list->num) return NULL;
if(data!=NULL)
{
if(size>0) memcpy( data, list->data[n],size);
else strcpy((char *)data,(char *)list->data[n]);
}
return list->data[n];
}
void mListClear(MList *list)
{
list->num=0;
struct HandleListCreate *handle0 = (struct HandleListCreate *)(ObjHandle(list,0)->handle);
if(handle0->memory!=NULL) mMemoryClear(handle0->memory);
}
void mListReorder(MList *list)
{
mException(INVALID_POINTER(list),EXIT,"invalid input source list");
void **data = list->data;
int list_num = list->num;
void *buff;
int i;
for(i=0;i<list_num;i++)
{
int j = mRand(0,list_num);
buff = data[i]; data[i] = data[j]; data[j] = buff;
}
}
void mListCopy(MList *src,MList *dst)
{
mListAppend(dst,src->num);
struct HandleListCreate *src_handle = (struct HandleListCreate *)(ObjHandle(src,0)->handle);
if(src_handle->memory == NULL)
{
memcpy(dst->data,src->data,src->num*sizeof(void *));
return;
}
struct HandleListCreate *dst_handle = (struct HandleListCreate *)(ObjHandle(dst,0)->handle);
if(dst_handle->memory == NULL)
dst_handle->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST);
mMemoryCopy(src_handle->memory,&(src->data),dst_handle->memory,&(src->data),1,&(src->num));
}
void mListMerge(MList *list1,MList *list2,MList *dst)
{
if(list1->num+list2->num==0) {mListClear(dst); return;}
mListAppend(dst,list1->num+list2->num);
struct HandleListCreate *handle1 =(struct HandleListCreate *)(ObjHandle(list1,0)->handle);
struct HandleListCreate *handle2 =(struct HandleListCreate *)(ObjHandle(list2,0)->handle);
struct HandleListCreate *dst_handle=(struct HandleListCreate *)(ObjHandle( dst,0)->handle);
int num1 = list1->num;
int num2 = list2->num;
if(dst==list1)
{
if(num2>0)
{
memcpy(dst->data+num1,list2->data,num2*sizeof(void *));
mFree(list2->data);list2->data = NULL;list2->num = 0;
}
}
else if(dst==list2)
{
if(num1>0)
{
memcpy(dst->data+num2,list1->data,num1*sizeof(void *));
mFree(list1->data);list1->data = NULL;list1->num = 0;
}
}
else
{
if(num1>0)
{
memcpy(dst->data ,list1->data,num1*sizeof(void *));
mFree(list1->data);list1->data = NULL;list1->num = 0;
}
if(num2>0)
{
memcpy(dst->data+num1,list2->data,num2*sizeof(void *));
mFree(list2->data);list2->data = NULL;list2->num = 0;
}
}
if(dst_handle->memory==NULL) dst_handle->memory = mMemoryCreate(DFLT,DFLT,MORN_HOST);
else mMemoryRedefine(dst_handle->memory,num1+num2,DFLT,DFLT);
mMemoryMerge(handle1->memory,handle2->memory,dst_handle->memory);
mMemoryRelease(handle1->memory);handle1->memory = NULL;
mMemoryRelease(handle2->memory);handle2->memory = NULL;
}
void mListElementDelete(MList *list,int n)
{
mException(INVALID_POINTER(list),EXIT,"invalid input");
mException((n>=list->num),EXIT,"invalid input");
memmove(list->data+n,list->data+n+1,(list->num-n-1)*sizeof(void *));
list->num-=1;
}
void *mListElementInsert(MList *list,int n,void *data,int size)
{
mListWrite(list,DFLT,data,size);
void *buff = list->data[list->num-1];
memmove(list->data+n+1,list->data+n,(list->num-n-1)*sizeof(void *));
list->data[n] = buff;
return buff;
}
void mListElementOperate(MList *list,void *function,void *para)
{
void (*func)(void *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int i;
// #pragma omp parallel for
for(i=0;i<list->num;i++)
func(list->data[i],para);
}
void mListElementScreen(MList *list,void *function,void *para)
{
int (*func)(void *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int n=0;
for(int i=0;i<list->num;i++)
{
if(func(list->data[i],para))
{
list->data[n] = list->data[i];
n=n+1;
}
}
list->num = n;
}
void mListElementSelect(MList *list,void *function,void *para)
{
void (*func)(void *,void *,int *,int *,void *) = function;
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
int n=0;
for(int i=0;i<list->num;i++)
{
if(list->data[i]==NULL)
continue;
int flag1=1;
for(int j=i+1;j<list->num;j++)
{
if(list->data[j] == NULL)
continue;
int flag2=1;
func(list->data[i],list->data[j],&flag1,&flag2,para);
if(flag2==0)
list->data[j]=NULL;
if(flag1==0)
break;
}
if(flag1==1)
{
list->data[n]=list->data[i];
n=n+1;
}
}
list->num = n;
}
/*
void mListSelect(MList *list,void (*func)(void *,void *,int *,int *,void *),void *para)
{
mException(INVALID_POINTER(list)||(func==NULL),EXIT,"invalid input");
void **data = list->data;
int *flag=mMalloc((list->num+2)*sizeof(int));
flag=flag+1;
memset(flag,DFLT,list->num*sizeof(int));
flag[-1]=list->num; flag[list->num]=-1;
int flag1,flag2;
while(1)
{
int ok=1;
for(int i=flag[i];i<list->num;i++)
{
if(flag[i]<0) continue;
for(int j=flag[i]+1;j<list->num;j++)
{
if(j==i) continue;
if((flag[j]>=0)&&(flag[j]<list->num)) continue;
func(data[i],data[j],&flag1,&flag2,para);
if(flag1==0) {flag[i] = j;ok=0;break;}
if(flag2==0) {flag[j] = i;ok=0;continue;}
}
if(flag[i]>=0) continue;
flag[i]=list->num;
}
if(ok) break;
}
int n=0;
for(int i=0;i<list->num;i++) if(flag[i]==list->num) {list->data[n]=data[i];n++;}
list->num = n;
mFree(flag-1);
}
*/
int mListCluster(MList *list,int *group,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(group==NULL)||(func==NULL),EXIT,"invalid input");
char *valid = (char *)mMalloc(list->num * sizeof(char));
memset(valid,0 ,list->num*sizeof(char));
memset(group,DFLT,list->num*sizeof(int));
int i,j,k;
int n=0;
for(i=0;i<list->num;i++)
{
for(j=0;j<i;j++)
{
if(group[i]==group[j]) continue;
if(func(list->data[i],list->data[j],para)==1)//同类
{
if(group[i] == DFLT)
group[i] = group[j];
else
{
valid[group[j]] = 0;
int g = group[j];
for(k=0;k<i;k++)
if(group[k] == g) group[k] = group[i];
}
}
}
if(group[i] == DFLT)
{
group[i] = n;
valid[n] = 1;
n = n+1;
}
}
int *c = (int *)mMalloc(n *sizeof(int));
int num = 0;
for(i=0;i<n;i++)
{
if(valid[i] != 0)
{c[i] = num;num +=1;}
}
mFree(valid);
for(i=0;i<list->num;i++)
group[i] = c[group[i]];
mFree(c);
return num;
}
struct HandleListClassify
{
int *group;
char *valid;
MSheet *sheet;
int list_num;
};
void endListClassify(struct HandleListClassify *handle)
{
if(handle->group!=NULL) mFree(handle->group);
if(handle->valid!=NULL) mFree(handle->valid);
if(handle->sheet!=NULL) mSheetRelease(handle->sheet);
}
#define HASH_ListClassify 0x24c19acf
MSheet *mListClassify(MList *list,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input");
MHandle *hdl = mHandle(list,ListClassify);
struct HandleListClassify *handle = (struct HandleListClassify *)(hdl->handle);
if((hdl->valid == 0)||(handle->list_num<list->num))
{
if(handle->list_num<list->num)
{
if(handle->group!=NULL) {mFree(handle->group);handle->group=NULL;}
if(handle->valid!=NULL) {mFree(handle->valid);handle->valid=NULL;}
}
if(handle->group==NULL) handle->group = (int *)mMalloc(list->num*sizeof(int ));
if(handle->valid==NULL) handle->valid = (char *)mMalloc(list->num*sizeof(char));
handle->list_num = list->num;
if(handle->sheet == NULL) handle->sheet = mSheetCreate();
hdl->valid = 1;
}
char *valid = handle->valid; int *group = handle->group;
memset(valid,0 ,list->num*sizeof(char));
memset(group,DFLT,list->num*sizeof(int));
int i,j,k;
int n=0;
for(i=0;i<list->num;i++)
{
for(j=0;j<i;j++)
{
if(group[i]==group[j]) continue;
if(func(list->data[i],list->data[j],para)==1)
{
if(group[i] == DFLT)
group[i] = group[j];
else
{
valid[group[j]] = 0;
int g = group[j];
for(k=0;k<i;k++)
if(group[k] == g) group[k] = group[i];
}
}
}
if(group[i] == DFLT)
{
group[i] = n;
valid[n] = 1;
n = n+1;
}
}
int *c = (int *)mMalloc(n *sizeof(int));
int num = 0;
for(i=0;i<n;i++)
{
if(valid[i] != 0)
{c[i] = num;num +=1;}
}
MSheet *sheet = handle->sheet;
mSheetClear(sheet);
mSheetRowAppend(sheet,num);
for(i=0;i<list->num;i++)
{
int g = c[group[i]];
int n = sheet->col[g];
mSheetColAppend(sheet,g,n+1);
sheet->data[g][n]=list->data[i];
}
mFree(c);
return sheet;
}
void _ListSort(void **list_data,int n,int (*func)(void *,void *,void *),void *para)
{
void *buff;
if(func(list_data[n-1],list_data[0],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[0];list_data[0]=buff;}
if(n==2) return;
if(func(list_data[ 1],list_data[0],para)<0) {buff=list_data[ 0];list_data[ 0]=list_data[1];}
else if(func(list_data[n-1],list_data[1],para)<0) {buff=list_data[n-1];list_data[n-1]=list_data[1];}
else buff=list_data[ 1];
if(n==3) {list_data[1]=buff;return;}
int i=1;int j=n-2;
while(1)
{
while(func(list_data[j],buff,para)>=0) {j=j-1;if(j==i) goto ListSort_next;}
list_data[i] = list_data[j]; i=i+1;if(i==j) goto ListSort_next;
while(func(list_data[i],buff,para)<=0) {i=i+1;if(i==j) goto ListSort_next;}
list_data[j] = list_data[i]; j=j-1;if(i==j) goto ListSort_next;
}
ListSort_next:
list_data[i]=buff;
if( i >1) _ListSort(list_data , i ,func,para);
if(n-i-1>1) _ListSort(list_data+i+1,n-i-1,func,para);
}
void mListSort(MList *list,void *function,void *para)
{
int (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(list))||(func==NULL),EXIT,"invalid input");
if(list->num<=1)return;
_ListSort(list->data,list->num,func,para);
}
struct HandleListMatch
{
int list_num;
int *idx;
};
void endListMatch(struct HandleListMatch *handle)
{
if(handle->idx!=NULL) mFree(handle->idx);
}
#define HASH_ListMatch 0x39871020
int *m_ListMatch(MList *src,MList *dst,float thresh,void *function,void *para)
{
float (*func)(void *,void *,void *) = function;
mException((INVALID_POINTER(src)||INVALID_POINTER(dst)),EXIT,"invalid input");
MHandle *hdl = mHandle(src,ListMatch);
struct HandleListMatch *handle = (struct HandleListMatch *)(hdl->handle);
if((hdl->valid==0)||(src->num>handle->list_num))
{
int list_num = MAX(src->num,handle->list_num);
if(list_num>handle->list_num)
{
if(handle->idx !=NULL) mFree(handle->idx);
handle->idx = mMalloc(list_num*sizeof(int));
handle->list_num = list_num;
}
hdl->valid = 1;
}
if(dst->num==0) {memset(handle->idx,DFLT,src->num*sizeof(int));return handle->idx;}
for(int i=0;i<src->num;i++)
{
float d_min = func(src->data[i],dst->data[0],para);int idx = 0;
for(int j=1;j<dst->num;j++)
{
float d = func(src->data[i],dst->data[j],para);
if(d<d_min){d_min=d;idx=j;}
}
handle->idx[i]=(d_min<thresh)?idx:DFLT;
}
return (handle->idx);
}
struct HandleStack
{
volatile int order;
};
void endStack(void *info) {}
#define HASH_Stack 0x8c4d4c73
void *mStackWrite(MList *stack,void *data,int size)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) handle->order = -1;
hdl->valid = 1;
if(handle->order==stack->num-1) return NULL;
mAtomicAdd(&(handle->order),1);
return mListWrite(stack,handle->order,data,size);
}
void *mStackRead(MList *stack,void *data,int size)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) return NULL;
if(handle->order <0) return NULL;
int order = mAtomicSub(&(handle->order),1);
void *p=stack->data[order];
if(data!=NULL)
{
if(size<=0) strcpy((char *)data,(char *)p);
else memcpy(data,p,size);
}
return p;
}
int mStackSize(MList *stack)
{
mException(INVALID_POINTER(stack),EXIT,"invalid stack");
MHandle *hdl=mHandle(stack,Stack);
struct HandleStack *handle = (struct HandleStack *)(hdl->handle);
if(hdl->valid == 0) handle->order = -1;
hdl->valid = 1;
return (handle->order+1);
}
// struct HandleQueue
// {
// volatile int read_order;
// volatile int write_order;
// volatile int flag;
// };
// void endQueue(void *info) {}
// #define HASH_Queue 0xd98b43dc
// int mQueueSize(MList *queue)
// {
// mException(INVALID_POINTER(queue),EXIT,"invalid queue");
// MHandle *hdl=mHandle(queue,Queue);
// struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
// if(handle->flag>0) return queue->num;
// if(handle->flag<0) return 0;
// int n = handle->write_order - handle->read_order;
// return ((n>0)?n:(queue->num+n));
// }
// void *mQueueWrite(MList *queue,void *data,int size)
// {
// mException(INVALID_POINTER(queue),EXIT,"invalid queue");
// mException(queue->num<=0,EXIT,"invalid queue");
// MHandle *hdl=mHandle(queue,Queue);
// struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
// if(hdl->valid == 0) {handle->read_order=0;handle->write_order=0;}
// hdl->valid = 1;
// if(handle->flag>0) return NULL;
// int order=mAtomicAdd(&(handle->write_order),1);
// if(order>=queue->num) order=order-queue->num;
// void *p = mListWrite(queue,order,data,size);
// mAtomicCompare(&(handle->write_order),queue->num,0);
// handle->flag =(handle->write_order == handle->read_order)?1:0;
// return p;
// }
// void *mQueueRead(MList *queue,void *data,int size)
// {
// mException(INVALID_POINTER(queue),EXIT,"invalid queue");
// mException(queue->num<=0,EXIT,"invalid queue");
// MHandle *hdl=mHandle(queue,Queue);
// struct HandleQueue *handle = (struct HandleQueue *)(hdl->handle);
// if(hdl->valid == 0) return NULL;
// if(handle->flag<0) return NULL;
// int order = mAtomicAdd(&(handle->read_order),1);
// void *p = queue->data[order];
// mAtomicCompare(&(handle->read_order),queue->num,0);
// handle->flag =(handle->write_order == handle->read_order)?-1:0;
// if(data!=NULL)
// {
// if(size<=0) strcpy((char *)data,(char *)p);
// else memcpy(data,p,size);
// }
// return p;
// }
// struct HashElement
// {
// int hash;
// void *data;
// };
// struct HandleHashList
// {
// int num;
// };
// void mHashList(MList *list,void *data,int size)
// {
// if(list->size <
/*
struct HandleBuffer
{
int proc_num;
int *order;
unsigned char *state;
};
void endBuffer(void *info)
{
struct HandleBuffer *handle = info;
if(handle->state != NULL) mFree(handle->state);
}
#define HASH_Buffer 0xcb4df739
int BufferRead(MList *buffer,int ID,struct HandleBuffer *handle)
{
int proc_num = handle->proc_num;
int order = handle->order[ID];
if(((ID >0)&&(handle->order[ID-1]==order))||((ID==0)&&(handle->order[proc_num-1]==order)))
return DFLT;
int state = handle->state[order];
if((state&1 == 1)||(order<0))
{
order = order + 1;
if(order == buffer->num)
{
if(handle->order[handle->proc_num-1]<0) return DFLT;
order = 0;
}
handle->state[handle->order[ID]] = 0;
handle->order[ID] = order;
return BufferRead(buffer,ID,handle);
}
return order;
}
void *mBufferSet(MList *buffer,int volume,int proc_num)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
if(volume>0)
{
if(buffer->num>volume) buff->num = volume;
else mListAppend(buff,volume);
}
mException(buffer->num<=1,EXIT,"invalid buffer");
mException((proc_num<=0),EXIT,"invalid proc_num");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
if(hdl->valid == 0)
{
handle->order = mMalloc(proc_num*sizeof(int));
memset(handle->order,-1,proc_num*sizeof(int));
handle->proc_num = proc_num;
handle->state = mMalloc(buffer->num*sizeof(unsigned char));
memset(handle->state,0,buffer->num*sizeof(unsigned char));
}
hdl->valid = 1;
}
void *mBufferWrite(MList *buffer,int ID,void *data,int size)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
mException((hdl->valid == 0),EXIT,"invalid buffer");
int proc_num = handle->proc_num;
mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID");
int order = handle->order[ID];
if((handle->state[order]&2!=0)||(order<0))
{
order = order+1;
if(order==buffer->num) order=0;
if((ID==0)&&(state[order]!=0)) return NULL;
if((ID >0)&&(state[order]!=4)) return NULL;
handle->state[handle->order] = 4;
handle->order[ID] = order;
}
void *p = mListWrite(buffer,order,data,size);
handle->state[order] = (handle->state[order])|2;
return p;
}
void mBufferRead(MList *buffer,int ID,void *data,int size)
{
mException(INVALID_POINTER(buffer),EXIT,"invalid buffer");
MHandle *hdl;ObjectHandle(buffer,Buffer,hdl);
struct HandleBuffer *handle = hdl->handle;
mException((hdl->valid == 0),EXIT,"invalid buffer");
int proc_num = handle->proc_num;
mException((ID>=proc_num)||(ID=<0),EXIT,"invalid ID");
int order = handle->order[ID];
if((handle->state[order]&1!=0)||(order<0))
{
order = order+1;
if(order==buffer->num)
{
if(handle->order[proc_num-1]< 0) return NULL;
order=0;
}
if(ID>0)
if(handle->order[ID -1]==order) return NULL;
else if(proc_num>1)
if(handle->order[proc_num-1]==order) return NULL;
handle->state[handle->order] = 0;
handle->order = order;
}
void *p = mListRead(buffer,order,data,size);
*/
|
cmat.c | /*
* Cheap maxtrix library for C
*
* Copyright (C) 2019 Hiroshi Kuwagata <kgt9221@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include "cmat.h"
#if defined(ENABLE_NEON) && (!defined(__ARM_NEON) && !defined(__ARM_NEON__))
#error "ARM NEON instruction set is not available."
#endif
#ifdef ENABLE_NEON
#include <arm_neon.h>
#endif /* defined(ENABLE_NEON) */
#define DEFAULT_ERROR __LINE__
#define DEFAULT_CUTOFF 1e-4
#define GROW(n) ((n * 13) / 10)
#define SHRINK(n) ((n * 10) / 13)
#define SWAP(a,b,t) do {t c; c = (a); (a) = (b); (b) = c;} while(0)
#ifdef ENABLE_NEON
#define ALIGN_ROWS(n) ((n) + (4 - ((n) % 4)))
#define ALIGN_COLS(n) ((n) + (4 - ((n) % 4)))
#else /* defined(ENABLE_NEON) */
#define ALIGN_ROWS(n) (n)
#define ALIGN_COLS(n) (n)
#endif /* defined(ENABLE_NEON) */
#ifdef ENABLE_NEON
static void
memcpy128(void* dst, void* src, size_t size)
{
int i;
for (i = 0; i < size; i += 16) {
vst1q_u8(dst + i, vld1q_u8(src + i));
}
}
static void
bzero128(void* dst, size_t size)
{
int i;
uint8x16_t zero;
zero = vmovq_n_u8(0);
for (i = 0; i < size; i += 16) {
vst1q_u8(dst + i, zero);
}
}
#endif /* defined(ENABLE_NEON) */
static int
alloc_object(int rows, int cols, cmat_t* org, cmat_t** dst)
{
int ret;
float* tbl;
float** row;
cmat_t* obj;
int stride;
int capa;
int i;
#ifdef ENABLE_NEON
int j;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
tbl = NULL;
row = NULL;
obj = NULL;
stride = ALIGN_COLS(cols);
capa = ALIGN_ROWS(rows);
do {
/*
* alloc memory
*/
obj = (cmat_t*)malloc(sizeof(cmat_t));
if (obj == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
if (capa > 0) {
tbl = (float*)malloc(sizeof(float) * capa * stride);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
row = (float**)malloc(sizeof(float*) * capa);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
for (i = 0; i < capa; i++) {
row[i] = tbl + (i * stride);
}
}
obj->tbl = tbl;
obj->row = row;
obj->rows = rows;
obj->cols = cols;
obj->stride = stride;
obj->capa = capa;
if (org) {
obj->coff = org->coff;
} else {
obj->coff = DEFAULT_CUTOFF;
}
*dst = obj;
} while (0);
/*
* post process
*/
if (ret) {
if (obj) free(obj);
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
static void
free_object(cmat_t* ptr)
{
if (ptr->tbl) free(ptr->tbl);
if (ptr->row) free(ptr->row);
free(ptr);
}
static void
replace_object(cmat_t* ptr, cmat_t** src)
{
free(ptr->tbl);
free(ptr->row);
memcpy(ptr, *src, sizeof(cmat_t));
free(*src);
*src = NULL;
}
static int
alloc_table(float** src, int rows, int cols, float** dt, float*** dr)
{
int ret;
float* tbl;
float** row;
int stride;
int i;
#ifdef ENABLE_NEON
int j;
#endif /* defined(ENABLE_NEON) */
ret = 0;
tbl = NULL;
row = NULL;
stride = ALIGN_COLS(cols);
do {
tbl = (float*)malloc(sizeof(float) * rows * stride);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
row = (float**)malloc(sizeof(float*) * rows);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
} while (0);
if (!ret) {
for (i = 0; i < rows; i++) {
row[i] = tbl + (i * cols);
if (src) memcpy(row[i], src[i], sizeof(float) * cols);
#ifdef ENABLE_NEON
for (j = cols; j < stride; j++) row[i][j] = 0.0f;
#endif /* defined(ENABLE_NEON) */
}
*dt = tbl;
*dr = row;
}
if(ret) {
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
static int
format(float val, char* dst, float thr)
{
int ret;
int i;
if (fabsf(val) > thr) {
sprintf(dst, "% f", val);
for (i = strlen(dst) - 1; i > 0; i--) {
switch (dst[i]) {
case '0':
break;
case '.':
dst[i + 0] = '\0';
ret = i;
goto loop_out;
default:
dst[i + 1] = '\0';
ret = i + 1;
goto loop_out;
}
}
} else {
strcpy(dst, " 0");
ret = 1;
}
loop_out:
return ret;
}
static inline int
fcmp(float f1, float f2, float coff)
{
/*
* 絶対的な誤差ではなく、有効桁を意識した比較を行うために
* 若干複雑な比較を行っているので注意。
*
* 例えば、非常に大きな値同士の比較などの場合
* この場合差が大きくとも誤差が有効桁に収まっている場合は、
* 同値として扱うことが妥当となる。単純に差分を基準値と比
* 較するだけでは対応できない。
*
* なお、現在の実装でも完全ではないのでいずれ修正すること。
*/
float v1;
int e1;
float v2;
int e2;
v1 = frexpf(f1, &e1);
v2 = frexpf(f2, &e2);
if (e1 == e2) {
f1 = v1;
f2 = v2;
}
#if 0
if (fabsf(f1 - f2) > coff) {
printf("%.20f %.20f %.20g\n", f1, f2, fabsf(f1 - f2));
}
#endif
return fabsf(f1 - f2) > coff;
}
/*
* http://hooktail.org/computer/index.php?LU%CA%AC%B2%F2
*/
static int
lu_decomp(float** row, int sz, float thr, int* piv)
{
int ret;
int i;
int j;
int k;
float max;
float tmp;
float* pi;
float* pj;
ret = 0;
if (piv) {
for (i = 0; i < sz; i++) piv[i] = i;
}
for (i = 0; i < sz; i++) {
pi = row[i];
max = fabsf(pi[i]);
k = i;
/* 注目行以降で最大の値(絶対値)の存在する行を探す */
for (j = i + 1; j < sz; j++) {
tmp = fabsf(row[j][i]);
/*
* 浮動小数点数の丸め誤差の蓄積のため、極小差の場合に大小比較がうまくい
* かないことがある。これを避けるために差の閾値比較で大小比較を行ってい
* る。以下のif文は tmp > max を評価している。
*/
if (tmp - max > thr) {
max = tmp;
k = j;
}
}
/* 注目行と最大値のあった行を入れ替える */
if (k != i) {
SWAP(row[i], row[k], float*);
if (piv) SWAP(piv[i], piv[k], int);
pi = row[i];
ret++;
}
/* この時点で対角成分が0の場合は注目行に対する分解は終わってると
考えてよいので次の行に移動する */
if (pi[i] == 0.0) continue;
/* forwarding erase */
#pragma omp parallel for private(k,pj,tmp)
for (j = i + 1; j < sz; j++) {
pj = row[j];
tmp = (pj[i] /= pi[i]);
for (k = i + 1; k < sz; k++) {
pj[k] -= tmp * pi[k];
}
}
}
return ret;
}
static float
det(float a, float b, float c, float d)
{
return (a * d) - (b * c);
}
static float
calc_det_dim2(float* r1, float* r2)
{
return det(r1[0], r1[1], r2[0], r2[1]);
}
static float
calc_det_dim3(float* r1, float* r2, float* r3)
{
float ret;
ret = (r1[0] * det(r2[1], r2[2], r3[1], r3[2])) -
(r2[0] * det(r1[1], r1[2], r3[1], r3[2])) +
(r3[0] * det(r1[1], r1[2], r2[1], r2[2]));
return ret;
}
static int
calc_det(float** row, int sz, float thr, float* dst)
{
int ret;
float* wt; // as "Work Table"
float** wr; // as "Work Rows"
float det;
int i;
int j;
int n;
do {
ret = 0;
wt = NULL;
wr = NULL;
/* alloc work buffer */
ret = alloc_table(row, sz, sz, &wt, &wr);
if (ret) break;
/* do LU decomposition */
n = lu_decomp(wr, sz, thr, NULL);
/* calc diagonal multiplier */
det = (n & 1)? -1.0: 1.0;
for (i = 0; i < sz; i++) {
det *= wr[i][i];
}
} while (0);
if (wr) free(wr);
if (wt) free(wt);
if (!ret) *dst = det;
return ret;
}
static void
calc_inverse(float** src, int n, float** dst)
{
int i;
int j;
int k;
float max;
float tmp;
float* si;
float* sj;
float* di;
float* dj;
/* create identity matrix */
for (i = 0; i < n; i++) {
#ifdef ENABLE_NEON
bzero128(dst[i], sizeof(float) * n);
#else /* defined(ENABLE_NEON) */
memset(dst[i], 0, sizeof(float) * n);
#endif /* defined(ENABLE_NEON) */
dst[i][i] = 1.0;
}
/* do row reduction method */
for (i = 0; i < n; i++) {
si = src[i];
di = dst[i];
max = fabsf(si[i]);
k = i;
/* ピボット操作 */
// 注目行以降で最大の値(絶対値)の存在する行を探す
for (j = i + 1; j < n; j++) {
tmp = fabsf(src[j][i]);
if (tmp > max) {
max = tmp;
k = j;
}
}
// 注目行と最大値のあった行を入れ替える
if (i != k) {
SWAP(src[i], src[k], float*);
SWAP(dst[i], dst[k], float*);
si = src[i];
di = dst[i];
}
if (si[i] == 0.0) continue;
/* ここからガウス・ジョルダン法 */
tmp = 1.0 / si[i];
for (j = 0; j < n; j++) {
si[j] *= tmp;
di[j] *= tmp;
}
#pragma omp parallel for private(k,sj,dj,tmp)
for (j = 0; j < n; j++) {
if (i == j) continue;
sj = src[j];
dj = dst[j];
tmp = sj[i];
for (k = 0; k < n; k++) {
sj[k] -= si[k] * tmp;
dj[k] -= di[k] * tmp;
}
}
}
}
static void
sort(int* a, size_t n)
{
int h;
int f;
int i;
/*
* sort by ascending order
*/
h = n;
do {
if (h > 1) {
h = SHRINK(h);
} else if (!f) {
break;
}
f = 0;
if (h == 9 || h == 10) h = 11;
for (i = 0; i < ((int)n - h); i++) {
if (a[i] > a[i + h]) {
SWAP(a[i], a[i + h], int);
f = !0;
}
}
} while (1);
}
/**
* 行列オブジェクトの生成
*
* @param n 列数の指定
* @param dst 生成したオブジェクトの格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_new(float* src, int rows, int cols, cmat_t** dst)
{
int ret;
cmat_t* obj;
int i;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (rows < 0) {
ret = CMAT_ERR_BSIZE;
break;
}
if (cols <= 0) {
ret = CMAT_ERR_BSIZE;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while(0);
/*
* alloc memory
*/
if (!ret) {
ret = alloc_object(rows, cols, NULL, &obj);
}
/*
* set initial values
*/
if (!ret) {
if (src) {
for (i = 0; i < rows; i++) {
memcpy(obj->row[i], src, sizeof(float) * cols);
src += cols;
}
} else {
#ifdef ENABLE_NEON
bzero128(obj->tbl, sizeof(float) * rows * obj->stride);
#else /* defined(ENABLE_NEON) */
memset(obj->tbl, 0, sizeof(float) * rows * obj->stride);
#endif /* defined(ENABLE_NEON) */
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = obj;
}
/*
* post process
*/
if (ret) {
if (obj) free_object(obj);
}
return ret;
}
/**
* 行列オブジェクトの複製
*
* @param ptr 複製元になる行列オブジェクト
* @param dst 生成したオブジェクトの格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_clone(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
int i;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while(0);
/*
* alloc memory
*/
if (!ret) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
}
/*
* copy values
*/
if (!ret) {
#ifdef ENABLE_NEON
memcpy128(obj->tbl, ptr->tbl, sizeof(float) * ptr->rows * ptr->stride);
#else /* defined(ENABLE_NEON) */
memcpy(obj->tbl, ptr->tbl, sizeof(float) * ptr->rows * ptr->stride);
#endif /* defined(ENABLE_NEON) */
for (i = 0; i < ptr->rows; i ++) {
obj->row[i] = obj->tbl + (ptr->row[i] - ptr->tbl);
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = obj;
}
return ret;
}
/**
* 行列オブジェクトの削除
*
* @param dst 削除するオブジェクトのポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_destroy(cmat_t* ptr)
{
int ret;
/*
* initialize
*/
ret = 0;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* release memory
*/
if (!ret) {
free_object(ptr);
}
return ret;
}
/**
* 行列オブジェクトの内容表示
*
* @param ptr 対象の行列オブジェクト
*
* @return エラーコード(0で正常終了)
*/
int
cmat_print(cmat_t* ptr, char* label)
{
int ret;
int r;
int c;
float* rp; // as "Row Pointer"
char fmt[32];
char str[32];
int len;
int max;
int i;
/*
* initialize
*/
ret = 0;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* make format string
*/
if (!ret) {
max = 0;
for (r = 0; r < ptr->rows; r++) {
rp = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
len = format(rp[c], str, ptr->coff);
if (len > max) max = len;
}
}
sprintf(fmt, "%%%ds", max);
}
/*
* show content
*/
if (!ret) {
if (label != NULL) printf("%s:\n", label);
for (r = 0; r < ptr->rows; r++) {
rp = ptr->row[r];
if (label != NULL) printf(" ");
printf("[");
for (c = 0; c < ptr->cols; c++) {
format(rp[c], str, ptr->coff);
printf(fmt, str);
if (c < (ptr->cols - 1)) printf(" ");
}
printf(" ]\n");
}
}
return ret;
}
/**
* 行の追加
*
* @param ptr 追加対象の行列オブジェクト
* @param src 追加する行のデータ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_append(cmat_t* ptr, float* src)
{
int ret;
float* tbl;
float** row;
int capa;
int i;
#ifdef ENABLE_NEON
int j;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
tbl = NULL;
row = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (src == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* grow table
*/
if (!ret) do {
if (ptr->capa == ptr->rows) {
capa = (ptr->capa < 10)? 10: GROW(ptr->capa);
capa = ALIGN_ROWS(capa);
tbl = (float*)realloc(ptr->tbl, sizeof(float) * capa * ptr->stride);
if (tbl == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
/* LU分解などで置換が発生している可能性がある。このため既存の行配置
を再現する必要がある(==既存の行テーブルを参照する必要がある)の
でrealloc()は使わない */
row = (float**)malloc(sizeof(float*) * capa);
if (row == NULL) {
ret = CMAT_ERR_NOMEM;
break;
}
if (ptr->row) {
/* 既存の行構成を再現する(他の演算でピボット操作で行位置が交換さ
れている場合がある) */
for (i = 0; i < ptr->capa; i++) {
row[i] = tbl + (ptr->row[i] - ptr->tbl);
}
/* 既存の行テーブルは不要になったので解放 */
free(ptr->row);
} else {
ptr->capa = 0;
}
/* 新規の部分の行構成を設定 */
for (i = ptr->capa; i < capa; i++) {
row[i] = tbl + (i * ptr->stride);
}
/* コンテキストの更新 */
ptr->tbl = tbl;
ptr->row = row;
ptr->capa = capa;
}
} while (0);
/*
* update context
*/
if (!ret) {
memcpy(ptr->row[ptr->rows], src, sizeof(float) * ptr->cols);
#ifdef ENABLE_NEON
for (j = ptr->cols; j < ptr->stride; j++) ptr->row[ptr->rows][j] = 0.0f;
#endif /* defined(ENABLE_NEON) */
ptr->rows++;
}
/*
* post process
*/
if (ret) {
if (tbl) free(tbl);
if (row) free(row);
}
return ret;
}
/**
* 行列の和
* ptr + op → dst (dst != NULL)
* ptr + op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op 和行列
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_add(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
float* s;
float* o;
float* d;
#ifdef ENABLE_NEON
float32x4_t vs;
float32x4_t vo;
float32x4_t vd;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != op->rows || ptr->cols != op->cols) ret = CMAT_ERR_SHAPE;
}
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef ENABLE_NEON
#pragma omp parallel for private(s,o,d,c,vs,vo,vd)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
o = op->row[r];
d = obj->row[r];
for (c = 0; c < ptr->cols; c += 4) {
vs = vld1q_f32(s);
vo = vld1q_f32(o);
vd = vaddq_f32(vs, vo);
vst1q_f32(d, vd);
s += 4;
o += 4;
d += 4;
}
}
#else /* defined(ENABLE_NEON) */
#pragma omp parallel for private(s,o,d,c)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
o = op->row[r];
d = obj->row[r];
for (c = 0; c < ptr->stride; c++) {
d[c] = s[c] + o[c];
}
}
#endif /* defined(ENABLE_NEON) */
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列の差
* ptr - op → dst (dst != NULL)
* ptr - op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op 和行列
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_sub(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
float* s;
float* o;
float* d;
#ifdef ENABLE_NEON
float32x4_t vs;
float32x4_t vo;
float32x4_t vd;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != op->rows || ptr->cols != op->cols) ret = CMAT_ERR_SHAPE;
}
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef ENABLE_NEON
#pragma omp parallel for private(s,o,d,c,vs,vo,vd)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
o = op->row[r];
d = obj->row[r];
for (c = 0; c < ptr->cols; c += 4) {
vs = vld1q_f32(s);
vo = vld1q_f32(o);
vd = vsubq_f32(vs, vo);
vst1q_f32(d, vd);
s += 4;
o += 4;
d += 4;
}
}
#else /* defined(ENABLE_NEON) */
#pragma omp parallel for private(s,o,d,c)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
o = op->row[r];
d = obj->row[r];
for (c = 0; c < ptr->cols; c++) {
d[c] = s[c] - o[c];
}
}
#endif /* defined(ENABLE_NEON) */
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列のスカラー積
* ptr * op → dst (dst != NULL)
* ptr * op → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param op スカラー値
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_mul(cmat_t* ptr, float op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
float* s;
float* d;
#ifdef ENABLE_NEON
float32x4_t vs;
float32x4_t vo;
float32x4_t vd;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* check argument
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (isnan(op)) {
ret = CMAT_ERR_INVAL;
break;
}
} while (0);
/*
* select target
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
} else {
obj = ptr;
}
}
/*
* do add operation
*/
if (!ret) {
#ifdef ENABLE_NEON
vo = vmovq_n_f32(op);
#pragma omp parallel for private(s,d,c,vs,vd)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
d = obj->row[r];
for (c = 0; c < ptr->cols; c += 4) {
vs = vld1q_f32(s);
vd = vmulq_f32(vs, vo);
vst1q_f32(d, vd);
s += 4;
d += 4;
}
}
#else /* defined(ENABLE_NEON) */
#pragma omp parallel for private(s,d,c)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
d = obj->row[r];
for (c = 0; c < ptr->cols; c++) {
d[c] = s[c] * op;
}
}
#endif /* defined(ENABLE_NEON) */
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst && obj) free_object(obj);
}
return ret;
}
/**
* 行列の積
* ptr * op → dst (dst != NULL)
* ptr * op → ptr (dst == NULL)
*
* @param ptr 転置対象の行列オブジェクト
* @param op 積行列
* @param dst 演算結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_product(cmat_t* ptr, cmat_t* op, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
int i;
#ifdef ENABLE_NEON
float** s;
float** o;
float** d;
float32x4_t vs;
float32x4_t vo;
float32x4_t vd;
#else /* defined(ENABLE_NEON) */
float* s;
float* d;
#endif /* defined(ENABLE_NEON) */
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check op value
*/
if (!ret) {
if (ptr->cols != op->rows) ret = CMAT_ERR_SHAPE;
}
/*
* alloc result object
*/
if (!ret) {
ret = alloc_object(ptr->rows, op->cols, ptr, &obj);
}
/*
* do multiple operation
*/
if (!ret) {
#ifdef ENABLE_NEON
s = ptr->row;
o = op->row;
d = obj->row;
#pragma omp parallel for private(c,i,vs,vo,vd)
for (r = 0; r < ptr->rows; r += 4) {
for (c = 0; c < op->cols; c++) {
// set 0 to destination
vd = vmovq_n_f32(0.0f);
for (i = 0; i < ptr->cols; i++) {
// load sources
vs = vsetq_lane_f32(s[r+0][i], vs, 0);
vs = vsetq_lane_f32(s[r+1][i], vs, 1);
vs = vsetq_lane_f32(s[r+2][i], vs, 2);
vs = vsetq_lane_f32(s[r+3][i], vs, 3);
// load operands
vo = vmovq_n_f32(o[i][c]);
// do product-sum
vd = vmlaq_f32(vd, vs, vo);
}
// store destination
d[r+0][c] = vgetq_lane_f32(vd, 0);
d[r+1][c] = vgetq_lane_f32(vd, 1);
d[r+2][c] = vgetq_lane_f32(vd, 2);
d[r+3][c] = vgetq_lane_f32(vd, 3);
}
}
#else /* defined(ENABLE_NEON) */
#pragma omp parallel for private(c,i)
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
d = obj->row[r];
for (c = 0; c < op->cols; c++) {
d[c] = 0.0;
for (i = 0; i < ptr->cols; i++) {
d[c] += s[i] * op->row[i][c];
}
}
}
#endif /* defined(ENABLE_NEON) */
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
replace_object(ptr, &obj);
}
}
/*
* post process
*/
if (ret) {
if (obj) free_object(obj);
}
return ret;
}
/**
* 行列の転置
* transpose(ptr) → dst (dst != NULL)
* transpose(ptr) → ptr (dst == NULL)
*
* @param ptr 転置対象の行列オブジェクト
* @param dst 転置結果の格納先
*
* @return エラーコード(0で正常終了)
*/
int
cmat_transpose(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
int r;
int c;
float* s;
/*
* initialize
*/
ret = 0;
obj = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* alloc result object
*/
if (!ret) {
ret = alloc_object(ptr->cols, ptr->rows, ptr, &obj);
}
/*
* do transpose operation
*/
if (!ret) {
for (r = 0; r < ptr->rows; r++) {
s = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
obj->row[c][r] = s[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
replace_object(ptr, &obj);
}
}
/*
* post process
*/
if (ret) {
if (obj) cmat_destroy(obj);
}
return ret;
}
/**
* 逆行列の算出
* inverse(ptr) → dst (dst != NULL)
* inverse(ptr) → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param dst 逆行列の格納先
*
* @return エラーコード(0で正常終了)
*
* @refer http://thira.plavox.info/blog/2008/06/_c.html
* http://www.yamamo10.jp/yamamoto/lecture/2006/5E/
* Linear_eauations/gaussj_html/node2.html
*/
int
cmat_inverse(cmat_t* ptr, cmat_t** dst)
{
int ret;
cmat_t* obj;
float det;
int i;
float* st; // as "Source Table"
float** sr; // as "Source Row"
float* dt; // as "Destination Table"
float** dr; // as "destination Row"
/*
* initialize
*/
ret = 0;
obj = NULL;
st = NULL;
sr = NULL;
dt = NULL;
dr = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* check if it's a regular matrix
*/
if (!ret) {
ret = cmat_det(ptr, &det);
}
if (!ret) {
if (fabsf(det) < ptr->coff) ret = CMAT_ERR_NREGL;
}
/*
* alloc work(or output) memory
*/
if (!ret) {
ret = alloc_table(NULL, ptr->capa, ptr->cols, &st, &sr);
}
/*
* alloc result object
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
if (!ret) {
for (i = 0; i < ptr->rows; i++) {
#ifdef ENABLE_NEON
memcpy128(sr[i], ptr->row[i], sizeof(float) * ptr->cols);
#else /* defined(ENABLE_NEON) */
memcpy(sr[i], ptr->row[i], sizeof(float) * ptr->cols);
#endif /* defined(ENABLE_NEON) */
}
dt = obj->tbl;
dr = obj->row;
}
} else {
dt = st;
dr = sr;
st = ptr->tbl;
sr = ptr->row;
}
}
/*
* calculate inverse matrix
*/
if (!ret) {
calc_inverse(sr, ptr->rows, dr);
}
/*
* put return parameter
*/
if (!ret) {
if (dst) {
*dst = obj;
} else {
free(ptr->tbl);
free(ptr->row);
ptr->tbl = dt;
ptr->row = dr;
}
}
/*
* post process
*/
if (ret) {
if (dst) {
if (obj) free_object(obj);
} else {
if (dt) free(dt);
if (dr) free(dr);
}
}
if (dst) {
if (st) free(st);
if (sr) free(sr);
}
return ret;
}
/**
* 行列のLU分解
* LU_decomp(ptr) → dst (dst != NULL)
* LU_decomp(ptr) → ptr (dst == NULL)
*
* @param ptr 対象の行列オブジェクト
* @param dst 分解行列の格納先
* @param piv 置換数列の格納先(必要ない場合はNULLを指定)
*
* @return エラーコード(0で正常終了)
*
* @note 上三角行列とした三角行列を合成した状態で出力するので注意
* 出力行列は以下のようになる。
* UUUUUU
* LUUUUU
* LLUUUU
* LLLUUU
* LLLLUU
* LLLLLU
* ※LU分解時の下三角行列の対角要素はすべて1なので省略している点に注意。
*
* @note 置換数列は置換先の数列で返される。置換行列への変換は呼び出し側で
* 行う必要がある。
*
* @refer http://thira.plavox.info/blog/2008/06/_c.html
*/
int
cmat_lu_decomp(cmat_t* ptr, cmat_t** dst, int* piv)
{
int ret;
cmat_t* obj;
int i;
float** row; // as "Source Row"
/*
* initialize
*/
ret = 0;
obj = NULL;
row = NULL;
/*
* argument check
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* alloc result object
*/
if (!ret) {
if (dst) {
ret = alloc_object(ptr->rows, ptr->cols, ptr, &obj);
if (!ret) {
for (i = 0; i < ptr->rows; i++) {
#ifdef ENABLE_NEON
memcpy128(obj->row[i], ptr->row[i], sizeof(float) * ptr->cols);
#else /* defined(ENABLE_NEON) */
memcpy(obj->row[i], ptr->row[i], sizeof(float) * ptr->cols);
#endif /* defined(ENABLE_NEON) */
}
row = obj->row;
}
} else {
row = ptr->row;
}
}
/*
* do LU decompression
*/
if (!ret) {
lu_decomp(row, ptr->rows, ptr->coff, piv);
}
/*
* put return parameter
*/
if (!ret) {
if (dst) *dst = obj;
}
/*
* post process
*/
if (ret) {
if (dst) {
if (obj) free_object(obj);
}
}
return ret;
}
/**
* 行列式の計算
* det(ptr) → dst
*
* @param ptr 対象の行列オブジェクト
* @param dst 算出結果の格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_det(cmat_t* ptr, float* dst)
{
int ret;
float det;
/*
* initialize
*/
ret = 0;
det = -1.0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if (ptr->rows != ptr->cols) ret = CMAT_ERR_SHAPE;
}
/*
* calc determinant
*/
if (!ret) {
switch (ptr->rows) {
case 1: // when 1x1
det = ptr->tbl[0];
break;
case 2: // when 2x2
det = calc_det_dim2(ptr->row[0], ptr->row[1]);
break;
case 3: // when 3x3
det = calc_det_dim3(ptr->row[0], ptr->row[1], ptr->row[2]);
break;
default: // when nxn
ret = calc_det(ptr->row, ptr->rows, ptr->coff, &det);
break;
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = det;
}
return ret;
}
/**
* 行列のドット積の計算
* ptr * op → dst
*
* @param ptr 対象の行列オブジェクト
* @param op オペランド
* @param dst 算出結果の格納先のポインタ
*
* @return エラーコード(0で正常終了)
*/
int
cmat_dot(cmat_t* ptr, cmat_t* op, float* dst)
{
int ret;
float dot;
int i;
int n;
int r1;
int r2;
float* s;
float* o;
/*
* initialize
*/
ret = 0;
dot = 0.0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* check shape
*/
if (!ret) {
if ((ptr->rows * ptr->cols) != (op->rows * op->cols)) ret = CMAT_ERR_SHAPE;
}
/*
* calc dot product
*/
if (!ret) {
n = ptr->rows * ptr->cols;
r1 = 0;
r2 = 0;
for (i = 0; i < n; i++) {
if (i % ptr->cols == 0) s = ptr->row[r1++];
if (i % op->cols == 0) o = op->row[r2++];
dot += s[i % ptr->cols] * o[i % op->cols];
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = dot;
}
return ret;
}
#ifdef DEBUG
/**
* 最大値の取得
*
* @param ptr 対象の行列オブジェクト
* @param dst 最大値を格納する領域
*
* @return エラーコード
*
* @note 本関数では絶対値で最大の値を探査する
*/
int
cmat_abs_max(cmat_t* ptr, float* dst)
{
int ret;
float max;
int r;
int c;
float* row;
/*
* initialize
*/
ret = 0;
max = 0.0f;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* lookup maximum value
*/
if (!ret) {
for (r = 0; r < ptr->rows; r++) {
row = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fabsf(row[c]) > fabsf(max)) max = row[c];
}
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = max;
}
return ret;
}
/**
* 最小値の取得
*
* @param ptr 対象の行列オブジェクト
* @param dst 最小値を格納する領域
*
* @return エラーコード
*
* @note 本関数では絶対値で最小の値を探査する
*/
int
cmat_abs_min(cmat_t* ptr, float* dst)
{
int ret;
float min;
int r;
int c;
float* row;
/*
* initialize
*/
ret = 0;
min = FLT_MAX;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* lookup minimum value
*/
if (!ret) {
if (ptr->rows > 0 && ptr->cols > 0) {
for (r = 0; r < ptr->rows; r++) {
row = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fabsf(row[c]) < fabsf(min)) min = row[c];
}
}
} else {
min = 0.0f;
}
}
/*
* put return parameter
*/
if (!ret) {
*dst = min;
}
return ret;
}
/**
* 行の置換
*
* @param ptr 置換対象の行列オブジェクト
* @param _piv 置換数列
*
* @return エラーコード
*
* @note 本館数は呼び出しオブジェクトを書き換える。
* @note 置換数列にはcmat_lu_decomp()が返す数列をそのまま使用できる。
*/
int
cmat_permute_row(cmat_t* ptr, int* _piv)
{
int ret;
int r;
int* piv;
int i;
/*
* initialize
*/
ret = 0;
piv = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (_piv == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* alloc pivots array
*/
if (!ret) {
piv = (int*)malloc(sizeof(int) * ptr->rows);
if (piv == NULL) ret = CMAT_ERR_NOMEM;
}
/*
* check pivots
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->rows);
sort(piv, ptr->rows);
for (i = 0; i < ptr->rows; i++) {
if (piv[i] != i) {
ret = CMAT_ERR_INVAL;
break;
}
}
}
/*
* do permutation row
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->rows);
for (r = 0; r < ptr->rows; r++) {
for (i = r; i < ptr->rows; i++) {
if (piv[i] == r) break;
}
if (r != i) {
SWAP(ptr->row[r], ptr->row[i], float*);
SWAP(piv[r], piv[i], int);
}
}
}
/*
* post process
*/
if (piv) free(piv);
return ret;
}
/**
* 列の置換
*
* @param ptr 置換対象の行列オブジェクト
* @param piv 置換数列
*
* @return エラーコード
*
* @note 本館数は呼び出しオブジェクトを書き換える。
* @note 置換数列にはcmat_lu_decomp()が返す数列をそのまま使用できる。
*/
int
cmat_permute_column(cmat_t* ptr, int* _piv)
{
int ret;
int r;
int* piv;
float* p;
int i;
int j;
/*
* initialize
*/
ret = 0;
piv = NULL;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (_piv == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* alloc pivots array
*/
if (!ret) {
piv = (int*)malloc(sizeof(int) * ptr->cols);
if (piv == NULL) ret = CMAT_ERR_NOMEM;
}
/*
* check pivots
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->cols);
sort(piv, ptr->rows);
for (i = 0; i < ptr->cols; i++) {
if (piv[i] != i) {
ret = CMAT_ERR_INVAL;
break;
}
}
}
/*
* do permutation column
*/
if (!ret) {
memcpy(piv, _piv, sizeof(int) * ptr->cols);
for (r = 0; r < ptr->rows; r++) {
for (i = r; i < ptr->cols; i++) {
if (piv[i] == r) break;
}
if (r != i) {
for (j = 0; j < ptr->rows; j++) {
SWAP(ptr->row[j][r], ptr->row[j][i], float);
}
SWAP(piv[r], piv[i], int);
}
}
}
/*
* post process
*/
if (piv) free(piv);
return ret;
}
#endif /* defined(DEBUG) */
/**
* 行列の比較
*
* @param ptr 対象の行列オブジェクト
* @param op 比較対象の行列オブジェクト
* @param dst チェック結果先のポインタ(0で一致)
*
* @return エラーコード(0で正常終了)
*/
int
cmat_compare(cmat_t* ptr, cmat_t* op, int* dst)
{
int ret;
int res;
int r;
int c;
float* p;
float* o;
/*
* initialize
*/
ret = 0;
res = !0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (op == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* compare matrixies
*/
if (!ret) do {
/* check shape */
if (ptr->rows != op->rows || ptr->cols != op->cols) break;
/* check values */
for (r = 0; r < ptr->rows; r++) {
p = ptr->row[r];
o = op->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fcmp(p[c], o[c], ptr->coff)) goto loop_out;
}
}
/* mark matched */
res = 0;
} while (0);
loop_out:
/*
* put return parameter
*/
if (!ret) {
*dst = res;
}
return ret;
}
/**
* 行列内容のチェック
*
* @param ptr 転置対象の行列オブジェクト
* @param val チェックする行列を一次元展開した配列
* @param dst チェック結果先のポインタ(0で一致)
*
* @return エラーコード(0で正常終了)
*/
int
cmat_check(cmat_t* ptr, float* val, int* dst)
{
int ret;
int res;
int r;
int c;
float* p;
/*
* initialize
*/
ret = 0;
res = !0;
/*
* argument check
*/
do {
if (ptr == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (val == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
if (dst == NULL) {
ret = CMAT_ERR_BADDR;
break;
}
} while (0);
/*
* put return parameter
*/
if (!ret) {
/* check values */
for (r = 0; r < ptr->rows; r++) {
p = ptr->row[r];
for (c = 0; c < ptr->cols; c++) {
if (fcmp(p[c], *val++, ptr->coff)) goto loop_out;
}
}
/* mark matched */
res = 0;
}
loop_out:
/*
* put return parameter
*/
if (!ret) {
*dst = res;
}
return ret;
}
/**
* 切り捨て処理の閾値の設定
*
* @param ptr 対象の行列オブジェクト
* @param val 閾値の値
*
* @return エラーコード(0で正常終了)
*/
int
cmat_set_cutoff_threshold(cmat_t* ptr, float val)
{
int ret;
/*
* initialize
*/
ret = 0;
/*
* check argument
*/
if (ptr == NULL) ret = CMAT_ERR_BADDR;
/*
* update context
*/
if (!ret) {
ptr->coff = fabsf(val);
}
return ret;
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
bool operator< (const EdgePair& rhs) const {
return u == rhs.u ? v < rhs.v : u < rhs.u;
}
bool operator== (const EdgePair& rhs) const {
return (u == rhs.u) && (v == rhs.v);
}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
OffsetT start_offset_;
public:
Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) :
n_(n), g_index_(g_index), start_offset_(0) {
OffsetT max_offset = end() - begin();
start_offset_ = std::min(start_offset, max_offset);
}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_] + start_offset_; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
return Neighborhood(n, out_index_, start_offset);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_, start_offset);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
reg_detect.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is int, default size is 50. */
#include "reg_detect.h"
/* Array initialization. */
static
void init_array(int maxgrid,
DATA_TYPE POLYBENCH_2D(sum_tang,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(mean,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(MAXGRID) ") final)")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(MAXGRID) ") final)")));
for (i = 0; i < maxgrid; i++)
for (j = 0; j < maxgrid; j++) {
sum_tang[i][j] = (DATA_TYPE)((i+1)*(j+1));
mean[i][j] = ((DATA_TYPE) i-j) / maxgrid;
path[i][j] = ((DATA_TYPE) i*(j-1)) / maxgrid;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int maxgrid,
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid))
{
int i, j;
for (i = 0; i < maxgrid; i++)
for (j = 0; j < maxgrid; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, path[i][j]);
if ((i * maxgrid + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
/* Source (modified): http://www.cs.uic.edu/~iluican/reg_detect.c */
static
void kernel_reg_detect(int niter, int maxgrid, int length,
DATA_TYPE POLYBENCH_2D(sum_tang,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(mean,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_2D(path,MAXGRID,MAXGRID,maxgrid,maxgrid),
DATA_TYPE POLYBENCH_3D(diff,MAXGRID,MAXGRID,LENGTH,maxgrid,maxgrid,length),
DATA_TYPE POLYBENCH_3D(sum_diff,MAXGRID,MAXGRID,LENGTH,maxgrid,maxgrid,length))
{
int t, i, j, cnt;
#pragma scop
#pragma omp parallel
{
#pragma omp master
{
for (t = 0; t < _PB_NITER; t++)
{
#pragma omp parallel for private (i, cnt) collapse(2) schedule(static)
for (j = 0; j <= _PB_MAXGRID - 1; j++)
for (i = j; i <= _PB_MAXGRID - 1; i++)
for (cnt = 0; cnt <= _PB_LENGTH - 1; cnt++)
diff[j][i][cnt] = sum_tang[j][i];
#pragma omp parallel for private (i, cnt) collapse(2) schedule(static)
for (j = 0; j <= _PB_MAXGRID - 1; j++)
{
for (i = j; i <= _PB_MAXGRID - 1; i++)
{
sum_diff[j][i][0] = diff[j][i][0];
for (cnt = 1; cnt <= _PB_LENGTH - 1; cnt++)
sum_diff[j][i][cnt] = sum_diff[j][i][cnt - 1] + diff[j][i][cnt];
mean[j][i] = sum_diff[j][i][_PB_LENGTH - 1];
}
}
#pragma omp parallel for
for (i = 0; i <= _PB_MAXGRID - 1; i++)
path[0][i] = mean[0][i];
#pragma omp parallel for private (i) collapse(2) schedule(static)
for (j = 1; j <= _PB_MAXGRID - 1; j++)
for (i = j; i <= _PB_MAXGRID - 1; i++)
path[j][i] = path[j - 1][i - 1] + mean[j][i];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int niter = NITER;
int maxgrid = MAXGRID;
int length = LENGTH;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(sum_tang, DATA_TYPE __attribute__((annotate("target('sum_tang') scalar()"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_2D_ARRAY_DECL(mean, DATA_TYPE __attribute__((annotate("target('mean') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_2D_ARRAY_DECL(path, DATA_TYPE __attribute__((annotate("target('path') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, maxgrid, maxgrid);
POLYBENCH_3D_ARRAY_DECL(diff, DATA_TYPE __attribute__((annotate("target('diff') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, LENGTH, maxgrid, maxgrid, length);
POLYBENCH_3D_ARRAY_DECL(sum_diff, DATA_TYPE __attribute__((annotate("target('sum_diff') scalar(range(0, 5000) final)"))), MAXGRID, MAXGRID, LENGTH, maxgrid, maxgrid, length);
/* Initialize array(s). */
init_array (maxgrid,
POLYBENCH_ARRAY(sum_tang),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(path));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_reg_detect (niter, maxgrid, length,
POLYBENCH_ARRAY(sum_tang),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(path),
POLYBENCH_ARRAY(diff),
POLYBENCH_ARRAY(sum_diff));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(maxgrid, POLYBENCH_ARRAY(path)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(sum_tang);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(path);
POLYBENCH_FREE_ARRAY(diff);
POLYBENCH_FREE_ARRAY(sum_diff);
return 0;
}
|
choleskies_cython.c | /* Generated by Cython 0.22 */
#define PY_SSIZE_T_CLEAN
#ifndef CYTHON_USE_PYLONG_INTERNALS
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 0
#else
#include "pyconfig.h"
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 1
#else
#define CYTHON_USE_PYLONG_INTERNALS 0
#endif
#endif
#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_22"
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#define __Pyx_PyFrozenSet_Size(s) PyObject_Size(s)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#define __Pyx_PyFrozenSet_Size(s) PySet_Size(s)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#define __Pyx_void_to_None(void_result) (void_result, Py_INCREF(Py_None), Py_None)
#ifdef __cplusplus
template<typename T>
void __Pyx_call_destructor(T* x) {
x->~T();
}
template<typename T>
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
__Pyx_FakeReference(T& ref) : ptr(&ref) { }
T *operator->() { return ptr; }
operator T&() { return *ptr; }
private:
T *ptr;
};
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__GPy__util__choleskies_cython
#define __PYX_HAVE_API__GPy__util__choleskies_cython
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "pythread.h"
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"GPy/util/choleskies_cython.pyx",
"__init__.pxd",
"GPy/util/stringsource",
"type.pxd",
};
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && MSC_VER
#include <Windows.h>
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using MSVC atomics"
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":729
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":736
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":741
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* "scipy/linalg/cython_blas.pxd":15
* # The original libraries should be linked directly.
*
* ctypedef float s # <<<<<<<<<<<<<<
* ctypedef double d
* ctypedef float complex c
*/
typedef float __pyx_t_5scipy_6linalg_11cython_blas_s;
/* "scipy/linalg/cython_blas.pxd":16
*
* ctypedef float s
* ctypedef double d # <<<<<<<<<<<<<<
* ctypedef float complex c
* ctypedef double complex z
*/
typedef double __pyx_t_5scipy_6linalg_11cython_blas_d;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":767
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "View.MemoryView":99
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":269
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":302
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":921
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":302
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":921
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name);
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE long __Pyx_div_long(long, long); /* proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback);
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
#if PY_MAJOR_VERSION >= 3
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
PyObject* args = PyTuple_Pack(1, key);
if (likely(args))
PyErr_SetObject(PyExc_KeyError, args);
Py_XDECREF(args);
}
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
#include <string.h>
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* proto */
#define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb);
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb);
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
static PyObject *__pyx_memview_get_double(const char *itemp);
static int __pyx_memview_set_double(const char *itemp, PyObject *obj);
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim);
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *);
static int __Pyx_check_binary_version(void);
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
static PyObject *__Pyx_ImportModule(const char *name);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig);
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'scipy.linalg.cython_blas' */
static __pyx_t_5scipy_6linalg_11cython_blas_d (*__pyx_f_5scipy_6linalg_11cython_blas_ddot)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/
static void (*__pyx_f_5scipy_6linalg_11cython_blas_dscal)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/
static void (*__pyx_f_5scipy_6linalg_11cython_blas_dsymv)(char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/
/* Module declarations from 'GPy.util.choleskies_cython' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "GPy.util.choleskies_cython"
int __pyx_module_is_main_GPy__util__choleskies_cython = 0;
/* Implementation of 'GPy.util.choleskies_cython' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_xrange;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M); /* proto */
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L); /* proto */
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static char __pyx_k_B[] = "B";
static char __pyx_k_D[] = "D";
static char __pyx_k_H[] = "H";
static char __pyx_k_I[] = "I";
static char __pyx_k_L[] = "L";
static char __pyx_k_M[] = "M";
static char __pyx_k_N[] = "N";
static char __pyx_k_O[] = "O";
static char __pyx_k_Q[] = "Q";
static char __pyx_k_b[] = "b";
static char __pyx_k_c[] = "c";
static char __pyx_k_d[] = "d";
static char __pyx_k_f[] = "f";
static char __pyx_k_g[] = "g";
static char __pyx_k_h[] = "h";
static char __pyx_k_i[] = "i";
static char __pyx_k_j[] = "j";
static char __pyx_k_k[] = "k";
static char __pyx_k_l[] = "l";
static char __pyx_k_m[] = "m";
static char __pyx_k_q[] = "q";
static char __pyx_k_u[] = "u";
static char __pyx_k_Zd[] = "Zd";
static char __pyx_k_Zf[] = "Zf";
static char __pyx_k_Zg[] = "Zg";
static char __pyx_k_dL[] = "dL";
static char __pyx_k_id[] = "id";
static char __pyx_k_mm[] = "mm";
static char __pyx_k_np[] = "np";
static char __pyx_k_obj[] = "obj";
static char __pyx_k_ret[] = "ret";
static char __pyx_k_base[] = "base";
static char __pyx_k_flat[] = "flat";
static char __pyx_k_main[] = "__main__";
static char __pyx_k_mode[] = "mode";
static char __pyx_k_name[] = "name";
static char __pyx_k_ndim[] = "ndim";
static char __pyx_k_pack[] = "pack";
static char __pyx_k_size[] = "size";
static char __pyx_k_step[] = "step";
static char __pyx_k_stop[] = "stop";
static char __pyx_k_test[] = "__test__";
static char __pyx_k_tril[] = "tril";
static char __pyx_k_class[] = "__class__";
static char __pyx_k_count[] = "count";
static char __pyx_k_dL_dK[] = "dL_dK";
static char __pyx_k_empty[] = "empty";
static char __pyx_k_error[] = "error";
static char __pyx_k_flags[] = "flags";
static char __pyx_k_numpy[] = "numpy";
static char __pyx_k_range[] = "range";
static char __pyx_k_shape[] = "shape";
static char __pyx_k_start[] = "start";
static char __pyx_k_zeros[] = "zeros";
static char __pyx_k_L_cont[] = "L_cont";
static char __pyx_k_format[] = "format";
static char __pyx_k_import[] = "__import__";
static char __pyx_k_name_2[] = "__name__";
static char __pyx_k_struct[] = "struct";
static char __pyx_k_unpack[] = "unpack";
static char __pyx_k_xrange[] = "xrange";
static char __pyx_k_asarray[] = "asarray";
static char __pyx_k_fortran[] = "fortran";
static char __pyx_k_memview[] = "memview";
static char __pyx_k_Ellipsis[] = "Ellipsis";
static char __pyx_k_itemsize[] = "itemsize";
static char __pyx_k_TypeError[] = "TypeError";
static char __pyx_k_enumerate[] = "enumerate";
static char __pyx_k_IndexError[] = "IndexError";
static char __pyx_k_ValueError[] = "ValueError";
static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static char __pyx_k_MemoryError[] = "MemoryError";
static char __pyx_k_RuntimeError[] = "RuntimeError";
static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static char __pyx_k_flat_to_triang[] = "flat_to_triang";
static char __pyx_k_triang_to_flat[] = "triang_to_flat";
static char __pyx_k_allocate_buffer[] = "allocate_buffer";
static char __pyx_k_dtype_is_object[] = "dtype_is_object";
static char __pyx_k_ascontiguousarray[] = "ascontiguousarray";
static char __pyx_k_backprop_gradient[] = "backprop_gradient";
static char __pyx_k_strided_and_direct[] = "<strided and direct>";
static char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static char __pyx_k_backprop_gradient_par[] = "backprop_gradient_par";
static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static char __pyx_k_backprop_gradient_par_c[] = "backprop_gradient_par_c";
static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)";
static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static char __pyx_k_GPy_util_choleskies_cython[] = "GPy.util.choleskies_cython";
static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static char __pyx_k_home_james_work_GPy_GPy_util_ch[] = "/home/james/work/GPy/GPy/util/choleskies_cython.pyx";
static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_D;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_n_s_GPy_util_choleskies_cython;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_L;
static PyObject *__pyx_n_s_L_cont;
static PyObject *__pyx_n_s_M;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_s_N;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_asarray;
static PyObject *__pyx_n_s_ascontiguousarray;
static PyObject *__pyx_n_s_backprop_gradient;
static PyObject *__pyx_n_s_backprop_gradient_par;
static PyObject *__pyx_n_s_backprop_gradient_par_c;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_count;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_dL;
static PyObject *__pyx_n_s_dL_dK;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_empty;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_flat;
static PyObject *__pyx_n_s_flat_to_triang;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_kp_s_home_james_work_GPy_GPy_util_ch;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_j;
static PyObject *__pyx_n_s_k;
static PyObject *__pyx_n_s_m;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mm;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_ret;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_triang_to_flat;
static PyObject *__pyx_n_s_tril;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_xrange;
static PyObject *__pyx_n_s_zeros;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__15;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_slice__17;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_tuple__27;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_tuple__33;
static PyObject *__pyx_codeobj__20;
static PyObject *__pyx_codeobj__22;
static PyObject *__pyx_codeobj__24;
static PyObject *__pyx_codeobj__26;
static PyObject *__pyx_codeobj__28;
/* "GPy/util/choleskies_cython.pyx":12
* cimport scipy.linalg.cython_blas as cblas
*
* def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<<
* """take a matrix N x D and return a D X M x M array where
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang[] = "take a matrix N x D and return a D X M x M array where\n\n N = M(M+1)/2\n\n the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat.\n ";
static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang = {"flat_to_triang", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang, METH_VARARGS|METH_KEYWORDS, __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang};
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_M;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("flat_to_triang (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_flat,&__pyx_n_s_M,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flat)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flat_to_triang") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_flat = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_flat.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_M = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(__pyx_self, __pyx_v_flat, __pyx_v_M);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M) {
int __pyx_v_D;
CYTHON_UNUSED int __pyx_v_N;
int __pyx_v_count;
__Pyx_memviewslice __pyx_v_ret = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_d;
int __pyx_v_m;
int __pyx_v_mm;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
long __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("flat_to_triang", 0);
/* "GPy/util/choleskies_cython.pyx":19
* the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat.
* """
* cdef int D = flat.shape[1] # <<<<<<<<<<<<<<
* cdef int N = flat.shape[0]
* cdef int count = 0
*/
__pyx_v_D = (__pyx_v_flat.shape[1]);
/* "GPy/util/choleskies_cython.pyx":20
* """
* cdef int D = flat.shape[1]
* cdef int N = flat.shape[0] # <<<<<<<<<<<<<<
* cdef int count = 0
* cdef double[:, :, ::1] ret = np.zeros((D, M, M))
*/
__pyx_v_N = (__pyx_v_flat.shape[0]);
/* "GPy/util/choleskies_cython.pyx":21
* cdef int D = flat.shape[1]
* cdef int N = flat.shape[0]
* cdef int count = 0 # <<<<<<<<<<<<<<
* cdef double[:, :, ::1] ret = np.zeros((D, M, M))
* cdef int d, m, mm
*/
__pyx_v_count = 0;
/* "GPy/util/choleskies_cython.pyx":22
* cdef int N = flat.shape[0]
* cdef int count = 0
* cdef double[:, :, ::1] ret = np.zeros((D, M, M)) # <<<<<<<<<<<<<<
* cdef int d, m, mm
* with nogil:
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_5) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_7.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_ret = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "GPy/util/choleskies_cython.pyx":24
* cdef double[:, :, ::1] ret = np.zeros((D, M, M))
* cdef int d, m, mm
* with nogil: # <<<<<<<<<<<<<<
* for d in range(D):
* count = 0
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
/* "GPy/util/choleskies_cython.pyx":25
* cdef int d, m, mm
* with nogil:
* for d in range(D): # <<<<<<<<<<<<<<
* count = 0
* for m in range(M):
*/
__pyx_t_8 = __pyx_v_D;
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_d = __pyx_t_9;
/* "GPy/util/choleskies_cython.pyx":26
* with nogil:
* for d in range(D):
* count = 0 # <<<<<<<<<<<<<<
* for m in range(M):
* for mm in range(m+1):
*/
__pyx_v_count = 0;
/* "GPy/util/choleskies_cython.pyx":27
* for d in range(D):
* count = 0
* for m in range(M): # <<<<<<<<<<<<<<
* for mm in range(m+1):
* ret[d, m, mm] = flat[count,d]
*/
__pyx_t_10 = __pyx_v_M;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_m = __pyx_t_11;
/* "GPy/util/choleskies_cython.pyx":28
* count = 0
* for m in range(M):
* for mm in range(m+1): # <<<<<<<<<<<<<<
* ret[d, m, mm] = flat[count,d]
* count += 1
*/
__pyx_t_12 = (__pyx_v_m + 1);
for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_mm = __pyx_t_13;
/* "GPy/util/choleskies_cython.pyx":29
* for m in range(M):
* for mm in range(m+1):
* ret[d, m, mm] = flat[count,d] # <<<<<<<<<<<<<<
* count += 1
* return ret
*/
__pyx_t_14 = __pyx_v_count;
__pyx_t_15 = __pyx_v_d;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_flat.shape[0];
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_flat.shape[1];
__pyx_t_16 = __pyx_v_d;
__pyx_t_17 = __pyx_v_m;
__pyx_t_18 = __pyx_v_mm;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_ret.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_ret.shape[1];
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_ret.shape[2];
*((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_ret.data + __pyx_t_16 * __pyx_v_ret.strides[0]) ) + __pyx_t_17 * __pyx_v_ret.strides[1]) )) + __pyx_t_18)) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_14 * __pyx_v_flat.strides[0]) ) + __pyx_t_15 * __pyx_v_flat.strides[1]) )));
/* "GPy/util/choleskies_cython.pyx":30
* for mm in range(m+1):
* ret[d, m, mm] = flat[count,d]
* count += 1 # <<<<<<<<<<<<<<
* return ret
*
*/
__pyx_v_count = (__pyx_v_count + 1);
}
}
}
}
/* "GPy/util/choleskies_cython.pyx":24
* cdef double[:, :, ::1] ret = np.zeros((D, M, M))
* cdef int d, m, mm
* with nogil: # <<<<<<<<<<<<<<
* for d in range(D):
* count = 0
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/util/choleskies_cython.pyx":31
* ret[d, m, mm] = flat[count,d]
* count += 1
* return ret # <<<<<<<<<<<<<<
*
* def triang_to_flat(double[:, :, :] L):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_ret, 3, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "GPy/util/choleskies_cython.pyx":12
* cimport scipy.linalg.cython_blas as cblas
*
* def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<<
* """take a matrix N x D and return a D X M x M array where
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_ret, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/util/choleskies_cython.pyx":33
* return ret
*
* def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<<
* cdef int D = L.shape[0]
* cdef int M = L.shape[1]
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat = {"triang_to_flat", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat, METH_O, 0};
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L) {
__Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("triang_to_flat (wrapper)", 0);
assert(__pyx_arg_L); {
__pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_arg_L); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(__pyx_self, __pyx_v_L);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L) {
int __pyx_v_D;
int __pyx_v_M;
int __pyx_v_N;
int __pyx_v_count;
__Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_d;
int __pyx_v_m;
int __pyx_v_mm;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
long __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("triang_to_flat", 0);
/* "GPy/util/choleskies_cython.pyx":34
*
* def triang_to_flat(double[:, :, :] L):
* cdef int D = L.shape[0] # <<<<<<<<<<<<<<
* cdef int M = L.shape[1]
* cdef int N = M*(M+1)/2
*/
__pyx_v_D = (__pyx_v_L.shape[0]);
/* "GPy/util/choleskies_cython.pyx":35
* def triang_to_flat(double[:, :, :] L):
* cdef int D = L.shape[0]
* cdef int M = L.shape[1] # <<<<<<<<<<<<<<
* cdef int N = M*(M+1)/2
* cdef int count = 0
*/
__pyx_v_M = (__pyx_v_L.shape[1]);
/* "GPy/util/choleskies_cython.pyx":36
* cdef int D = L.shape[0]
* cdef int M = L.shape[1]
* cdef int N = M*(M+1)/2 # <<<<<<<<<<<<<<
* cdef int count = 0
* cdef double[:, ::1] flat = np.empty((N, D))
*/
__pyx_v_N = __Pyx_div_long((__pyx_v_M * (__pyx_v_M + 1)), 2);
/* "GPy/util/choleskies_cython.pyx":37
* cdef int M = L.shape[1]
* cdef int N = M*(M+1)/2
* cdef int count = 0 # <<<<<<<<<<<<<<
* cdef double[:, ::1] flat = np.empty((N, D))
* cdef int d, m, mm
*/
__pyx_v_count = 0;
/* "GPy/util/choleskies_cython.pyx":38
* cdef int N = M*(M+1)/2
* cdef int count = 0
* cdef double[:, ::1] flat = np.empty((N, D)) # <<<<<<<<<<<<<<
* cdef int d, m, mm
* with nogil:
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 38; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_flat = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "GPy/util/choleskies_cython.pyx":40
* cdef double[:, ::1] flat = np.empty((N, D))
* cdef int d, m, mm
* with nogil: # <<<<<<<<<<<<<<
* for d in range(D):
* count = 0
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
/* "GPy/util/choleskies_cython.pyx":41
* cdef int d, m, mm
* with nogil:
* for d in range(D): # <<<<<<<<<<<<<<
* count = 0
* for m in range(M):
*/
__pyx_t_7 = __pyx_v_D;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_d = __pyx_t_8;
/* "GPy/util/choleskies_cython.pyx":42
* with nogil:
* for d in range(D):
* count = 0 # <<<<<<<<<<<<<<
* for m in range(M):
* for mm in range(m+1):
*/
__pyx_v_count = 0;
/* "GPy/util/choleskies_cython.pyx":43
* for d in range(D):
* count = 0
* for m in range(M): # <<<<<<<<<<<<<<
* for mm in range(m+1):
* flat[count,d] = L[d, m, mm]
*/
__pyx_t_9 = __pyx_v_M;
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
__pyx_v_m = __pyx_t_10;
/* "GPy/util/choleskies_cython.pyx":44
* count = 0
* for m in range(M):
* for mm in range(m+1): # <<<<<<<<<<<<<<
* flat[count,d] = L[d, m, mm]
* count += 1
*/
__pyx_t_11 = (__pyx_v_m + 1);
for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) {
__pyx_v_mm = __pyx_t_12;
/* "GPy/util/choleskies_cython.pyx":45
* for m in range(M):
* for mm in range(m+1):
* flat[count,d] = L[d, m, mm] # <<<<<<<<<<<<<<
* count += 1
* return flat
*/
__pyx_t_13 = __pyx_v_d;
__pyx_t_14 = __pyx_v_m;
__pyx_t_15 = __pyx_v_mm;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[0];
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_L.shape[1];
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_L.shape[2];
__pyx_t_16 = __pyx_v_count;
__pyx_t_17 = __pyx_v_d;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_flat.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_flat.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_16 * __pyx_v_flat.strides[0]) )) + __pyx_t_17)) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_13 * __pyx_v_L.strides[0]) ) + __pyx_t_14 * __pyx_v_L.strides[1]) ) + __pyx_t_15 * __pyx_v_L.strides[2]) )));
/* "GPy/util/choleskies_cython.pyx":46
* for mm in range(m+1):
* flat[count,d] = L[d, m, mm]
* count += 1 # <<<<<<<<<<<<<<
* return flat
*
*/
__pyx_v_count = (__pyx_v_count + 1);
}
}
}
}
/* "GPy/util/choleskies_cython.pyx":40
* cdef double[:, ::1] flat = np.empty((N, D))
* cdef int d, m, mm
* with nogil: # <<<<<<<<<<<<<<
* for d in range(D):
* count = 0
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/util/choleskies_cython.pyx":47
* flat[count,d] = L[d, m, mm]
* count += 1
* return flat # <<<<<<<<<<<<<<
*
* def backprop_gradient(double[:, :] dL, double[:, :] L):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_flat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "GPy/util/choleskies_cython.pyx":33
* return ret
*
* def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<<
* cdef int D = L.shape[0]
* cdef int M = L.shape[1]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_L, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/util/choleskies_cython.pyx":49
* return flat
*
* def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient = {"backprop_gradient", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("backprop_gradient (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(__pyx_self, __pyx_v_dL, __pyx_v_L);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) {
__Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_N;
int __pyx_v_k;
int __pyx_v_j;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("backprop_gradient", 0);
/* "GPy/util/choleskies_cython.pyx":50
*
* def backprop_gradient(double[:, :] dL, double[:, :] L):
* cdef double[:, ::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<<
* cdef int N = L.shape[0]
* cdef int k, j, i
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_dL_dK = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "GPy/util/choleskies_cython.pyx":51
* def backprop_gradient(double[:, :] dL, double[:, :] L):
* cdef double[:, ::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0] # <<<<<<<<<<<<<<
* cdef int k, j, i
* with nogil:
*/
__pyx_v_N = (__pyx_v_L.shape[0]);
/* "GPy/util/choleskies_cython.pyx":53
* cdef int N = L.shape[0]
* cdef int k, j, i
* with nogil: # <<<<<<<<<<<<<<
* for k in range(N - 1, -1, -1):
* for j in range(k + 1, N):
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
/* "GPy/util/choleskies_cython.pyx":54
* cdef int k, j, i
* with nogil:
* for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<<
* for j in range(k + 1, N):
* for i in range(j, N):
*/
for (__pyx_t_7 = (__pyx_v_N - 1); __pyx_t_7 > -1; __pyx_t_7-=1) {
__pyx_v_k = __pyx_t_7;
/* "GPy/util/choleskies_cython.pyx":55
* with nogil:
* for k in range(N - 1, -1, -1):
* for j in range(k + 1, N): # <<<<<<<<<<<<<<
* for i in range(j, N):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
*/
__pyx_t_8 = __pyx_v_N;
for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_j = __pyx_t_9;
/* "GPy/util/choleskies_cython.pyx":56
* for k in range(N - 1, -1, -1):
* for j in range(k + 1, N):
* for i in range(j, N): # <<<<<<<<<<<<<<
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
*/
__pyx_t_10 = __pyx_v_N;
for (__pyx_t_11 = __pyx_v_j; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "GPy/util/choleskies_cython.pyx":57
* for j in range(k + 1, N):
* for i in range(j, N):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<<
* dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
* for j in range(k + 1, N):
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_j;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_dL_dK.shape[1];
__pyx_t_14 = __pyx_v_j;
__pyx_t_15 = __pyx_v_k;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_L.shape[0];
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_L.shape[1];
__pyx_t_16 = __pyx_v_i;
__pyx_t_17 = __pyx_v_k;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_16 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_17)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_12 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_13)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_14 * __pyx_v_L.strides[0]) ) + __pyx_t_15 * __pyx_v_L.strides[1]) ))));
/* "GPy/util/choleskies_cython.pyx":58
* for i in range(j, N):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* dL_dK[j, k] -= dL_dK[i, j] * L[i, k] # <<<<<<<<<<<<<<
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k]
*/
__pyx_t_18 = __pyx_v_i;
__pyx_t_19 = __pyx_v_j;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[1];
__pyx_t_20 = __pyx_v_i;
__pyx_t_21 = __pyx_v_k;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[0];
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_L.shape[1];
__pyx_t_22 = __pyx_v_j;
__pyx_t_23 = __pyx_v_k;
if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_22 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_23)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_18 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_19)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_20 * __pyx_v_L.strides[0]) ) + __pyx_t_21 * __pyx_v_L.strides[1]) ))));
}
}
/* "GPy/util/choleskies_cython.pyx":59
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
* for j in range(k + 1, N): # <<<<<<<<<<<<<<
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
*/
__pyx_t_8 = __pyx_v_N;
for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) {
__pyx_v_j = __pyx_t_9;
/* "GPy/util/choleskies_cython.pyx":60
* dL_dK[j, k] -= dL_dK[i, j] * L[i, k]
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<<
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k])
*/
__pyx_t_10 = __pyx_v_k;
__pyx_t_11 = __pyx_v_k;
if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_v_L.shape[0];
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_v_L.shape[1];
__pyx_t_24 = __pyx_v_j;
__pyx_t_25 = __pyx_v_k;
if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_24 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_25)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_10 * __pyx_v_L.strides[0]) ) + __pyx_t_11 * __pyx_v_L.strides[1]) )));
/* "GPy/util/choleskies_cython.pyx":61
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<<
* dL_dK[k, k] /= (2. * L[k, k])
* return dL_dK
*/
__pyx_t_26 = __pyx_v_j;
__pyx_t_27 = __pyx_v_k;
if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_L.shape[0];
if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_L.shape[1];
__pyx_t_28 = __pyx_v_j;
__pyx_t_29 = __pyx_v_k;
if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_dL_dK.shape[1];
__pyx_t_30 = __pyx_v_k;
__pyx_t_31 = __pyx_v_k;
if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_30 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_31)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_26 * __pyx_v_L.strides[0]) ) + __pyx_t_27 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_28 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_29)) ))));
}
/* "GPy/util/choleskies_cython.pyx":62
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<<
* return dL_dK
*
*/
__pyx_t_8 = __pyx_v_k;
__pyx_t_9 = __pyx_v_k;
if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_v_L.shape[0];
if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[1];
__pyx_t_32 = __pyx_v_k;
__pyx_t_33 = __pyx_v_k;
if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_32 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_33)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_8 * __pyx_v_L.strides[0]) ) + __pyx_t_9 * __pyx_v_L.strides[1]) ))));
}
}
/* "GPy/util/choleskies_cython.pyx":53
* cdef int N = L.shape[0]
* cdef int k, j, i
* with nogil: # <<<<<<<<<<<<<<
* for k in range(N - 1, -1, -1):
* for j in range(k + 1, N):
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/util/choleskies_cython.pyx":63
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k])
* return dL_dK # <<<<<<<<<<<<<<
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 63; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "GPy/util/choleskies_cython.pyx":49
* return flat
*
* def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_L, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/util/choleskies_cython.pyx":65
* return dL_dK
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<<
* cdef double[:,::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par = {"backprop_gradient_par", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("backprop_gradient_par (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(__pyx_self, __pyx_v_dL, __pyx_v_L);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) {
__Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_N;
int __pyx_v_k;
int __pyx_v_j;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_7;
long __pyx_t_8;
int __pyx_t_9;
long __pyx_t_10;
long __pyx_t_11;
long __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("backprop_gradient_par", 0);
/* "GPy/util/choleskies_cython.pyx":66
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L):
* cdef double[:,::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<<
* cdef int N = L.shape[0]
* cdef int k, j, i
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_dL_dK = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "GPy/util/choleskies_cython.pyx":67
* def backprop_gradient_par(double[:,:] dL, double[:,:] L):
* cdef double[:,::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0] # <<<<<<<<<<<<<<
* cdef int k, j, i
* with nogil:
*/
__pyx_v_N = (__pyx_v_L.shape[0]);
/* "GPy/util/choleskies_cython.pyx":69
* cdef int N = L.shape[0]
* cdef int k, j, i
* with nogil: # <<<<<<<<<<<<<<
* for k in range(N - 1, -1, -1):
* with parallel():
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
/* "GPy/util/choleskies_cython.pyx":70
* cdef int k, j, i
* with nogil:
* for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<<
* with parallel():
* for i in prange(k + 1, N):
*/
for (__pyx_t_7 = (__pyx_v_N - 1); __pyx_t_7 > -1; __pyx_t_7-=1) {
__pyx_v_k = __pyx_t_7;
/* "GPy/util/choleskies_cython.pyx":71
* with nogil:
* for k in range(N - 1, -1, -1):
* with parallel(): # <<<<<<<<<<<<<<
* for i in prange(k + 1, N):
* for j in range(k+1, i+1):
*/
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_12, __pyx_t_9, __pyx_t_15, __pyx_t_17, __pyx_t_10, __pyx_t_19, __pyx_t_16, __pyx_t_18, __pyx_t_24, __pyx_t_26, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_20, __pyx_t_22, __pyx_t_21, __pyx_t_11, __pyx_t_23, __pyx_t_25)
#endif /* _OPENMP */
{
/* "GPy/util/choleskies_cython.pyx":72
* for k in range(N - 1, -1, -1):
* with parallel():
* for i in prange(k + 1, N): # <<<<<<<<<<<<<<
* for j in range(k+1, i+1):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
*/
__pyx_t_8 = (__pyx_v_k + 1);
__pyx_t_9 = __pyx_v_N;
if (1 == 0) abort();
{
__pyx_t_11 = (__pyx_t_9 - __pyx_t_8) / 1;
if (__pyx_t_11 > 0)
{
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_j) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_11; __pyx_t_10++){
{
__pyx_v_i = __pyx_t_8 + 1 * __pyx_t_10;
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
/* "GPy/util/choleskies_cython.pyx":73
* with parallel():
* for i in prange(k + 1, N):
* for j in range(k+1, i+1): # <<<<<<<<<<<<<<
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* for j in range(i, N):
*/
__pyx_t_12 = (__pyx_v_i + 1);
for (__pyx_t_13 = (__pyx_v_k + 1); __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_j = __pyx_t_13;
/* "GPy/util/choleskies_cython.pyx":74
* for i in prange(k + 1, N):
* for j in range(k+1, i+1):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<<
* for j in range(i, N):
* dL_dK[i, k] -= dL_dK[j, i] * L[j, k]
*/
__pyx_t_14 = __pyx_v_i;
__pyx_t_15 = __pyx_v_j;
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL_dK.shape[1];
__pyx_t_16 = __pyx_v_j;
__pyx_t_17 = __pyx_v_k;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1];
__pyx_t_18 = __pyx_v_i;
__pyx_t_19 = __pyx_v_k;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_18 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_19)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_14 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) ) + __pyx_t_17 * __pyx_v_L.strides[1]) ))));
}
/* "GPy/util/choleskies_cython.pyx":75
* for j in range(k+1, i+1):
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* for j in range(i, N): # <<<<<<<<<<<<<<
* dL_dK[i, k] -= dL_dK[j, i] * L[j, k]
* for j in range(k + 1, N):
*/
__pyx_t_13 = __pyx_v_N;
for (__pyx_t_20 = __pyx_v_i; __pyx_t_20 < __pyx_t_13; __pyx_t_20+=1) {
__pyx_v_j = __pyx_t_20;
/* "GPy/util/choleskies_cython.pyx":76
* dL_dK[i, k] -= dL_dK[i, j] * L[j, k]
* for j in range(i, N):
* dL_dK[i, k] -= dL_dK[j, i] * L[j, k] # <<<<<<<<<<<<<<
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k]
*/
__pyx_t_21 = __pyx_v_j;
__pyx_t_22 = __pyx_v_i;
if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL_dK.shape[1];
__pyx_t_23 = __pyx_v_j;
__pyx_t_24 = __pyx_v_k;
if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_L.shape[0];
if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_L.shape[1];
__pyx_t_25 = __pyx_v_i;
__pyx_t_26 = __pyx_v_k;
if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_25 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_26)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_21 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_22)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_23 * __pyx_v_L.strides[0]) ) + __pyx_t_24 * __pyx_v_L.strides[1]) ))));
}
}
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/* "GPy/util/choleskies_cython.pyx":77
* for j in range(i, N):
* dL_dK[i, k] -= dL_dK[j, i] * L[j, k]
* for j in range(k + 1, N): # <<<<<<<<<<<<<<
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
*/
__pyx_t_9 = __pyx_v_N;
for (__pyx_t_13 = (__pyx_v_k + 1); __pyx_t_13 < __pyx_t_9; __pyx_t_13+=1) {
__pyx_v_j = __pyx_t_13;
/* "GPy/util/choleskies_cython.pyx":78
* dL_dK[i, k] -= dL_dK[j, i] * L[j, k]
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<<
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k])
*/
__pyx_t_20 = __pyx_v_k;
__pyx_t_27 = __pyx_v_k;
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[0];
if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_L.shape[1];
__pyx_t_28 = __pyx_v_j;
__pyx_t_29 = __pyx_v_k;
if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_28 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_29)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_20 * __pyx_v_L.strides[0]) ) + __pyx_t_27 * __pyx_v_L.strides[1]) )));
/* "GPy/util/choleskies_cython.pyx":79
* for j in range(k + 1, N):
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<<
* dL_dK[k, k] /= (2. * L[k, k])
* return dL_dK
*/
__pyx_t_30 = __pyx_v_j;
__pyx_t_31 = __pyx_v_k;
if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_L.shape[0];
if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_L.shape[1];
__pyx_t_32 = __pyx_v_j;
__pyx_t_33 = __pyx_v_k;
if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_dL_dK.shape[1];
__pyx_t_34 = __pyx_v_k;
__pyx_t_35 = __pyx_v_k;
if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_34 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_35)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_30 * __pyx_v_L.strides[0]) ) + __pyx_t_31 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_32 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_33)) ))));
}
/* "GPy/util/choleskies_cython.pyx":80
* dL_dK[j, k] /= L[k, k]
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<<
* return dL_dK
*
*/
__pyx_t_9 = __pyx_v_k;
__pyx_t_13 = __pyx_v_k;
if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[0];
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[1];
__pyx_t_36 = __pyx_v_k;
__pyx_t_37 = __pyx_v_k;
if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_v_dL_dK.shape[0];
if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_v_dL_dK.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_36 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_37)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_9 * __pyx_v_L.strides[0]) ) + __pyx_t_13 * __pyx_v_L.strides[1]) ))));
}
}
/* "GPy/util/choleskies_cython.pyx":69
* cdef int N = L.shape[0]
* cdef int k, j, i
* with nogil: # <<<<<<<<<<<<<<
* for k in range(N - 1, -1, -1):
* with parallel():
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/util/choleskies_cython.pyx":81
* dL_dK[k, k] -= L[j, k] * dL_dK[j, k]
* dL_dK[k, k] /= (2. * L[k, k])
* return dL_dK # <<<<<<<<<<<<<<
*
* cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 81; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "GPy/util/choleskies_cython.pyx":65
* return dL_dK
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<<
* cdef double[:,::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_L, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "GPy/util/choleskies_cython.pyx":83
* return dL_dK
*
* cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<<
* cdef int i, k, n
*
*/
static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int __pyx_v_N, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) {
int __pyx_v_i;
int __pyx_v_k;
int __pyx_v_n;
double __pyx_v_alpha;
double __pyx_v_beta;
int __pyx_v_incx;
double __pyx_v_scale;
long __pyx_t_1;
long __pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
int __pyx_t_5;
long __pyx_t_6;
long __pyx_t_7;
long __pyx_t_8;
int __pyx_t_9;
long __pyx_t_10;
int __pyx_t_11;
long __pyx_t_12;
int __pyx_t_13;
long __pyx_t_14;
long __pyx_t_15;
long __pyx_t_16;
int __pyx_t_17;
long __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
double __pyx_t_21;
int __pyx_t_22;
long __pyx_t_23;
int __pyx_t_24;
long __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "GPy/util/choleskies_cython.pyx":87
*
* # DSYMV required constant arguments
* cdef double alpha=-1, beta=1 # <<<<<<<<<<<<<<
* cdef int incx=N
*
*/
__pyx_v_alpha = -1.0;
__pyx_v_beta = 1.0;
/* "GPy/util/choleskies_cython.pyx":88
* # DSYMV required constant arguments
* cdef double alpha=-1, beta=1
* cdef int incx=N # <<<<<<<<<<<<<<
*
* # DSCAL required arguments
*/
__pyx_v_incx = __pyx_v_N;
/* "GPy/util/choleskies_cython.pyx":93
* cdef double scale
*
* dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) # <<<<<<<<<<<<<<
* for k in range(N-2, -1, -1):
* n = N-k-1
*/
__pyx_t_1 = (__pyx_v_N - 1);
__pyx_t_2 = (__pyx_v_N - 1);
if (__pyx_t_1 < 0) __pyx_t_1 += __pyx_v_L.shape[0];
if (__pyx_t_2 < 0) __pyx_t_2 += __pyx_v_L.shape[1];
__pyx_t_3 = (__pyx_v_N - 1);
__pyx_t_4 = (__pyx_v_N - 1);
if (__pyx_t_3 < 0) __pyx_t_3 += __pyx_v_dL.shape[0];
if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_v_dL.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_3 * __pyx_v_dL.strides[0]) )) + __pyx_t_4)) )) /= (2. * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_1 * __pyx_v_L.strides[0]) )) + __pyx_t_2)) ))));
/* "GPy/util/choleskies_cython.pyx":94
*
* dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1])
* for k in range(N-2, -1, -1): # <<<<<<<<<<<<<<
* n = N-k-1
* cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx,
*/
for (__pyx_t_5 = (__pyx_v_N - 2); __pyx_t_5 > -1; __pyx_t_5-=1) {
__pyx_v_k = __pyx_t_5;
/* "GPy/util/choleskies_cython.pyx":95
* dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1])
* for k in range(N-2, -1, -1):
* n = N-k-1 # <<<<<<<<<<<<<<
* cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx,
* beta=&beta, y=&dL[k + 1, k], incy=&N)
*/
__pyx_v_n = ((__pyx_v_N - __pyx_v_k) - 1);
/* "GPy/util/choleskies_cython.pyx":96
* for k in range(N-2, -1, -1):
* n = N-k-1
* cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<<
* beta=&beta, y=&dL[k + 1, k], incy=&N)
*
*/
__pyx_t_6 = (__pyx_v_k + 1);
__pyx_t_7 = (__pyx_v_k + 1);
if (__pyx_t_6 < 0) __pyx_t_6 += __pyx_v_dL.shape[0];
if (__pyx_t_7 < 0) __pyx_t_7 += __pyx_v_dL.shape[1];
__pyx_t_8 = (__pyx_v_k + 1);
__pyx_t_9 = __pyx_v_k;
if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_v_L.shape[0];
if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[1];
/* "GPy/util/choleskies_cython.pyx":97
* n = N-k-1
* cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx,
* beta=&beta, y=&dL[k + 1, k], incy=&N) # <<<<<<<<<<<<<<
*
* for i in xrange(0, N - k - 1):
*/
__pyx_t_10 = (__pyx_v_k + 1);
__pyx_t_11 = __pyx_v_k;
if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_v_dL.shape[0];
if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_v_dL.shape[1];
/* "GPy/util/choleskies_cython.pyx":96
* for k in range(N-2, -1, -1):
* n = N-k-1
* cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<<
* beta=&beta, y=&dL[k + 1, k], incy=&N)
*
*/
__pyx_f_5scipy_6linalg_11cython_blas_dsymv(__pyx_k_u, (&__pyx_v_n), (&__pyx_v_alpha), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_6 * __pyx_v_dL.strides[0]) )) + __pyx_t_7)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_8 * __pyx_v_L.strides[0]) )) + __pyx_t_9)) )))), (&__pyx_v_incx), (&__pyx_v_beta), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_10 * __pyx_v_dL.strides[0]) )) + __pyx_t_11)) )))), (&__pyx_v_N));
/* "GPy/util/choleskies_cython.pyx":99
* beta=&beta, y=&dL[k + 1, k], incy=&N)
*
* for i in xrange(0, N - k - 1): # <<<<<<<<<<<<<<
* dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k]
*
*/
__pyx_t_12 = ((__pyx_v_N - __pyx_v_k) - 1);
for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) {
__pyx_v_i = __pyx_t_13;
/* "GPy/util/choleskies_cython.pyx":100
*
* for i in xrange(0, N - k - 1):
* dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] # <<<<<<<<<<<<<<
*
* scale = 1.0 / L[k, k]
*/
__pyx_t_14 = ((__pyx_v_k + __pyx_v_i) + 1);
__pyx_t_15 = ((__pyx_v_k + __pyx_v_i) + 1);
if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL.shape[0];
if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL.shape[1];
__pyx_t_16 = ((__pyx_v_k + 1) + __pyx_v_i);
__pyx_t_17 = __pyx_v_k;
if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0];
if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1];
__pyx_t_18 = ((__pyx_v_k + 1) + __pyx_v_i);
__pyx_t_19 = __pyx_v_k;
if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL.shape[0];
if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_18 * __pyx_v_dL.strides[0]) )) + __pyx_t_19)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_14 * __pyx_v_dL.strides[0]) )) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) )) + __pyx_t_17)) ))));
}
/* "GPy/util/choleskies_cython.pyx":102
* dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k]
*
* scale = 1.0 / L[k, k] # <<<<<<<<<<<<<<
* cblas.dscal(&n, &scale , &dL[k + 1, k], &N)
* #
*/
__pyx_t_13 = __pyx_v_k;
__pyx_t_20 = __pyx_v_k;
if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_L.shape[0];
if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_L.shape[1];
__pyx_t_21 = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_13 * __pyx_v_L.strides[0]) )) + __pyx_t_20)) )));
if (unlikely(__pyx_t_21 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 102; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_scale = (1.0 / __pyx_t_21);
/* "GPy/util/choleskies_cython.pyx":103
*
* scale = 1.0 / L[k, k]
* cblas.dscal(&n, &scale , &dL[k + 1, k], &N) # <<<<<<<<<<<<<<
* #
* dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx)
*/
__pyx_t_12 = (__pyx_v_k + 1);
__pyx_t_22 = __pyx_v_k;
if (__pyx_t_12 < 0) __pyx_t_12 += __pyx_v_dL.shape[0];
if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL.shape[1];
__pyx_f_5scipy_6linalg_11cython_blas_dscal((&__pyx_v_n), (&__pyx_v_scale), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_12 * __pyx_v_dL.strides[0]) )) + __pyx_t_22)) )))), (&__pyx_v_N));
/* "GPy/util/choleskies_cython.pyx":105
* cblas.dscal(&n, &scale , &dL[k + 1, k], &N)
* #
* dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) # <<<<<<<<<<<<<<
* dL[k, k] /= (2.0 * L[k, k])
*
*/
__pyx_t_23 = (__pyx_v_k + 1);
__pyx_t_24 = __pyx_v_k;
if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL.shape[0];
if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL.shape[1];
__pyx_t_25 = (__pyx_v_k + 1);
__pyx_t_26 = __pyx_v_k;
if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_L.shape[0];
if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_L.shape[1];
__pyx_t_27 = __pyx_v_k;
__pyx_t_28 = __pyx_v_k;
if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_dL.shape[0];
if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_27 * __pyx_v_dL.strides[0]) )) + __pyx_t_28)) )) -= __pyx_f_5scipy_6linalg_11cython_blas_ddot((&__pyx_v_n), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_23 * __pyx_v_dL.strides[0]) )) + __pyx_t_24)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_25 * __pyx_v_L.strides[0]) )) + __pyx_t_26)) )))), (&__pyx_v_incx));
/* "GPy/util/choleskies_cython.pyx":106
* #
* dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx)
* dL[k, k] /= (2.0 * L[k, k]) # <<<<<<<<<<<<<<
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L):
*/
__pyx_t_29 = __pyx_v_k;
__pyx_t_30 = __pyx_v_k;
if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_L.shape[0];
if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_L.shape[1];
__pyx_t_31 = __pyx_v_k;
__pyx_t_32 = __pyx_v_k;
if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL.shape[0];
if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL.shape[1];
*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_31 * __pyx_v_dL.strides[0]) )) + __pyx_t_32)) )) /= (2.0 * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_29 * __pyx_v_L.strides[0]) )) + __pyx_t_30)) ))));
}
/* "GPy/util/choleskies_cython.pyx":83
* return dL_dK
*
* cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<<
* cdef int i, k, n
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_WriteUnraisable("GPy.util.choleskies_cython.chol_backprop", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
}
/* "GPy/util/choleskies_cython.pyx":108
* dL[k, k] /= (2.0 * L[k, k])
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
*/
/* Python wrapper */
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c = {"backprop_gradient_par_c", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("backprop_gradient_par_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par_c") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_dL.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_L.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(__pyx_self, __pyx_v_dL, __pyx_v_L);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) {
__Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_L_cont = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_N;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("backprop_gradient_par_c", 0);
/* "GPy/util/choleskies_cython.pyx":109
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L):
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig # <<<<<<<<<<<<<<
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
* cdef int N = L.shape[0]
*/
__pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_4) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_dL_dK = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "GPy/util/choleskies_cython.pyx":110
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L):
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L) # <<<<<<<<<<<<<<
* cdef int N = L.shape[0]
* with nogil:
*/
__pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_L, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = NULL;
PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1);
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_L_cont = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "GPy/util/choleskies_cython.pyx":111
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
* cdef int N = L.shape[0] # <<<<<<<<<<<<<<
* with nogil:
* chol_backprop(N, dL_dK, L_cont)
*/
__pyx_v_N = (__pyx_v_L.shape[0]);
/* "GPy/util/choleskies_cython.pyx":112
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
* cdef int N = L.shape[0]
* with nogil: # <<<<<<<<<<<<<<
* chol_backprop(N, dL_dK, L_cont)
* return np.asarray(dL_dK)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
/* "GPy/util/choleskies_cython.pyx":113
* cdef int N = L.shape[0]
* with nogil:
* chol_backprop(N, dL_dK, L_cont) # <<<<<<<<<<<<<<
* return np.asarray(dL_dK)
*/
__pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(__pyx_v_N, __pyx_v_dL_dK, __pyx_v_L_cont);
}
/* "GPy/util/choleskies_cython.pyx":112
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
* cdef int N = L.shape[0]
* with nogil: # <<<<<<<<<<<<<<
* chol_backprop(N, dL_dK, L_cont)
* return np.asarray(dL_dK)
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "GPy/util/choleskies_cython.pyx":114
* with nogil:
* chol_backprop(N, dL_dK, L_cont)
* return np.asarray(dL_dK) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_asarray); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_3)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_3) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = NULL;
PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__pyx_t_5 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "GPy/util/choleskies_cython.pyx":108
* dL[k, k] /= (2.0 * L[k, k])
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_L_cont, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_L, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L4;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
* # This is allocated as one block, strides first.
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231
* info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
goto __pyx_L11;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
* else:
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237
* info.strides = <Py_ssize_t*>PyArray_STRIDES(self)
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238
* info.shape = <Py_ssize_t*>PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":247
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":249
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L14;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
switch (__pyx_v_t) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
case NPY_BYTE:
__pyx_v_f = __pyx_k_b;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = __pyx_k_B;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = __pyx_k_h;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = __pyx_k_H;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = __pyx_k_i;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = __pyx_k_I;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = __pyx_k_l;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = __pyx_k_L;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = __pyx_k_q;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = __pyx_k_Q;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = __pyx_k_f;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = __pyx_k_d;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = __pyx_k_g;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = __pyx_k_Zf;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = __pyx_k_Zd;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = __pyx_k_Zg;
break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = __pyx_k_O;
break;
default:
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 279; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
break;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
* return
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284
* else:
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285
* info.format = <char*>stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_7;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L3;
}
__pyx_L3:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":295
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L4;
}
__pyx_L4:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 784; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":793
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
if (unlikely(__pyx_v_descr->fields == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
* child, new_offset = fields
*
* if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 801; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
if (__pyx_t_6) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 816; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 824; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":843
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 843; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 845; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 847; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L15:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L13;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 852; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":797
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":975
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
* Py_INCREF(base) # important to do this before decref below!
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
* baseptr = <PyObject*>base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":981
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return <object>arr.base
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/*else*/ {
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":983
* return None
* else:
* return <object>arr.base # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_arr->base));
__pyx_r = ((PyObject *)__pyx_v_arr->base);
goto __pyx_L0;
}
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":979
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "View.MemoryView":117
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":123
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":124
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":126
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":127
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":129
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":130
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if isinstance(format, unicode):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":132
* raise ValueError("itemsize <= 0 for cython.array")
*
* if isinstance(format, unicode): # <<<<<<<<<<<<<<
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyUnicode_Check(__pyx_v_format);
__pyx_t_4 = (__pyx_t_2 != 0);
if (__pyx_t_4) {
/* "View.MemoryView":133
*
* if isinstance(format, unicode):
* format = (<unicode>format).encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
if (unlikely(__pyx_v_format == Py_None)) {
PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", "encode");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_3 = PyUnicode_AsASCIIString(((PyObject*)__pyx_v_format)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":134
* if isinstance(format, unicode):
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":135
* format = (<unicode>format).encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->format = __pyx_t_5;
/* "View.MemoryView":138
*
*
* self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":139
*
* self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":141
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":142
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":145
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_6 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_7); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_7); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_v_dim = __pyx_t_8;
__pyx_v_idx = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":146
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":147
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_7 = 0;
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_10);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":148
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":145
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":151
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":152
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":153
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
goto __pyx_L10;
}
/* "View.MemoryView":154
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":155
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":156
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
goto __pyx_L10;
}
/*else*/ {
/* "View.MemoryView":158
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L10:;
/* "View.MemoryView":160
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":163
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":164
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":165
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":168
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":169
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":170
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":172
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":173
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":174
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":175
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":176
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":116
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":179
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":180
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":181
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":182
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
/* "View.MemoryView":183
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":184
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":185
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":186
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":187
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":188
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":189
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":190
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":191
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":192
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":193
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":194
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":196
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":197
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":199
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":201
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":179
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":205
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":206
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":207
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
goto __pyx_L3;
}
/* "View.MemoryView":208
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":209
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":210
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":212
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyMem_Free(self._shape)
*
*/
free(__pyx_v_self->data);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":213
* self._strides, self.ndim, False)
* free(self.data)
* PyMem_Free(self._shape) # <<<<<<<<<<<<<<
*
* property memview:
*/
PyMem_Free(__pyx_v_self->_shape);
/* "View.MemoryView":205
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":217
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* Python wrapper */
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":219
* def __get__(self):
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":220
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":217
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":223
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":224
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":223
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":227
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":230
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":229
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":234
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":238
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":239
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":241
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":242
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":241
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":243
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":245
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":234
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":271
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":272
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":271
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":273
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":274
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":273
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":288
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":290
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":294
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":296
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":297
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":299
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview')
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":288
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":317
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":318
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":319
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":320
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type)));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":321
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":322
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":323
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":324
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* self.lock = PyThread_allocate_lock()
*/
Py_INCREF(Py_None);
goto __pyx_L6;
}
__pyx_L6:;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":326
* Py_INCREF(Py_None)
*
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock == NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":327
*
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":328
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":330
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = self.view.format == b'O'
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":331
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_1;
goto __pyx_L8;
}
/*else*/ {
/* "View.MemoryView":333
* self.dtype_is_object = self.view.format == b'O'
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L8:;
/* "View.MemoryView":335
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":337
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":317
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":339
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":340
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":341
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* if self.lock != NULL:
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":343
* __Pyx_ReleaseBuffer(&self.view)
*
* if self.lock != NULL: # <<<<<<<<<<<<<<
* PyThread_free_lock(self.lock)
*
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":344
*
* if self.lock != NULL:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":339
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":346
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":348
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":350
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":351
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":350
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":353
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":346
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":356
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":357
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":358
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
}
/* "View.MemoryView":360
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":363
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":364
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":366
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":367
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":356
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":369
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":370
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":372
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":373
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":374
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":375
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":377
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":379
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":369
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":381
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":382
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type));
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":384
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":385
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":384
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":386
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":387
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L11_try_end:;
}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":389
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":381
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":391
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":395
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":396
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":397
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":395
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":391
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":399
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[128];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
char const *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":401
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":406
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":408
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":409
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":410
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":411
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":412
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":414
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":416
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":417
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":418
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
goto __pyx_L8;
}
/*else*/ {
/* "View.MemoryView":420
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L8:;
/* "View.MemoryView":424
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":425
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":426
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":429
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
/*exception exit:*/{
__pyx_L6_error:;
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_6);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11);
}
__Pyx_XGIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8);
__pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0;
__pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":399
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":432
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":433
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 433; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":431
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":435
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":438
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 438; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":441
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":442
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":443
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_7 = NULL;
}
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
}
/*else:*/ {
/* "View.MemoryView":447
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":448
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
}
/* "View.MemoryView":449
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":444
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_12 = PyErr_ExceptionMatches(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (__pyx_t_12) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_9);
/* "View.MemoryView":445
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":435
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":451
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
char *__pyx_t_10;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":454
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":459
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":460
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":462
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
}
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":464
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_9 = __pyx_v_bytesvalue;
__pyx_t_11 = PyBytes_AS_STRING(__pyx_t_9);
__pyx_t_12 = (__pyx_t_11 + PyBytes_GET_SIZE(__pyx_t_9));
for (__pyx_t_13 = __pyx_t_11; __pyx_t_13 < __pyx_t_12; __pyx_t_13++) {
__pyx_t_10 = __pyx_t_13;
__pyx_v_c = (__pyx_t_10[0]);
/* "View.MemoryView":465
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_7;
/* "View.MemoryView":464
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":465
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":451
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":468
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":469
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":470
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":472
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":474
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":475
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":477
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":479
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":480
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":482
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":484
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":485
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":487
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":489
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":490
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":491
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":492
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":493
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":494
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":468
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":502
* @cname('__pyx_memoryview_transpose')
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":503
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":504
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* property base:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":501
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":508
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":509
* @cname('__pyx_memoryview__get__base')
* def __get__(self):
* return self.obj # <<<<<<<<<<<<<<
*
* property shape:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":508
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":513
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":514
* @cname('__pyx_memoryview_get_shape')
* def __get__(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property strides:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":513
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":519
* @cname('__pyx_memoryview_get_strides')
* def __get__(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":521
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":523
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property suboffsets:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":518
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":527
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":528
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
* def __get__(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__14, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":531
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* property ndim:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":527
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":535
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":536
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* property itemsize:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":535
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":540
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":541
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* property nbytes:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 541; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":540
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":545
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":546
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* property size:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":545
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":550
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":551
* @cname('__pyx_memoryview_get_size')
* def __get__(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":552
* def __get__(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":554
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":555
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":557
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":559
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":550
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":561
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":562
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":563
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
}
/* "View.MemoryView":565
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":561
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":568
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":569
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":568
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":571
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":572
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":571
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":578
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":579
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":581
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":584
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":585
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":581
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":587
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":589
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":591
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":592
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":597
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":587
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":599
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":601
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":603
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":604
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":609
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":599
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":613
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":614
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":615
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":616
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":613
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":620
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type));
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":619
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":622
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":627
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":628
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":630
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":632
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":633
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":634
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":635
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":636
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":637
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":638
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__15);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":639
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":641
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
/* "View.MemoryView":642
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":644
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":645
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_11);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__Pyx_Raise(__pyx_t_7, 0, 0, 0);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":647
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":648
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L6:;
/* "View.MemoryView":635
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":650
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":651
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":652
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__17);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L13;
}
__pyx_L13:;
/* "View.MemoryView":654
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_7);
__pyx_t_7 = 0;
goto __pyx_L0;
/* "View.MemoryView":622
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":656
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":657
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":658
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":659
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
/* "View.MemoryView":656
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":667
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":674
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":678
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 678; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/* "View.MemoryView":680
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":681
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 681; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":682
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":684
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":685
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":691
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":692
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":697
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":698
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":702
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 702; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":703
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":707
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 707; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":704
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 704; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
/* "View.MemoryView":710
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":711
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":712
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":713
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1;
/* "View.MemoryView":714
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":716
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":717
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":718
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":720
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 720; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":721
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":722
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":724
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 724; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":730
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":702
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":732
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":733
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":734
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":735
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":733
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":738
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":739
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":738
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 738; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":666
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":763
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":783
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":785
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":786
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":787
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":788
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":791
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":793
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":794
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":797
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":798
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":799
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":800
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":801
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L12;
}
/* "View.MemoryView":802
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":803
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":804
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L14;
}
/*else*/ {
/* "View.MemoryView":806
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
goto __pyx_L12;
}
__pyx_L12:;
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":808
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":809
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L15;
}
/*else*/ {
/* "View.MemoryView":811
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":813
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":814
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":815
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":816
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":817
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
goto __pyx_L18;
}
__pyx_L18:;
goto __pyx_L17;
}
/* "View.MemoryView":818
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":819
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
goto __pyx_L17;
}
__pyx_L17:;
goto __pyx_L16;
}
/*else*/ {
/* "View.MemoryView":821
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":822
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1;
goto __pyx_L19;
}
/*else*/ {
/* "View.MemoryView":824
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":826
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":827
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
goto __pyx_L20;
}
__pyx_L20:;
/* "View.MemoryView":831
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":833
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":834
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
goto __pyx_L21;
}
__pyx_L21:;
/* "View.MemoryView":836
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":837
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
goto __pyx_L22;
}
__pyx_L22:;
/* "View.MemoryView":840
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":841
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":842
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":845
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":846
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
goto __pyx_L23;
}
/*else*/ {
/* "View.MemoryView":848
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":850
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":851
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":852
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
goto __pyx_L26;
}
/*else*/ {
/* "View.MemoryView":855
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 855; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L26:;
goto __pyx_L25;
}
/*else*/ {
/* "View.MemoryView":858
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
goto __pyx_L24;
}
__pyx_L24:;
/* "View.MemoryView":860
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":763
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":866
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":868
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1;
/* "View.MemoryView":869
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":872
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":873
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 873; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":874
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":876
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":877
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":878
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":879
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
goto __pyx_L4;
}
__pyx_L4:;
}
__pyx_L3:;
/* "View.MemoryView":881
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":883
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":884
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 884; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":886
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":887
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 887; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":889
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":890
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":893
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":866
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":899
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":900
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":902
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":903
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":907
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":908
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":909
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":910
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":912
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L6_bool_binop_done:;
if (__pyx_t_6) {
/* "View.MemoryView":913
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 913; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
}
/* "View.MemoryView":915
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":899
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":932
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":933
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":932
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":935
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":936
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":937
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 937; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":939
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 939; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":935
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":941
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":942
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":943
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 943; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":945
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* property base:
*/
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 945; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":941
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":949
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":950
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":949
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":956
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":964
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":965
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/* "View.MemoryView":970
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 970; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":972
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":973
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":975
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 975; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":976
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":978
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":979
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":980
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":981
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":982
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":984
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":986
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":987
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":990
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":991
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":992
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":993
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":994
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L5_break;
}
}
__pyx_L5_break:;
/* "View.MemoryView":996
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":997
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 997; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":998
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 998; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1000
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1001
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1003
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":956
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1006
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1009
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1010
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1010; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1011
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1013
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1014
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1006
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1017
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1021
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1022
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1023
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1025
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1026
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1028
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1029
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1030
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1031
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_4 = -1;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4;
}
/* "View.MemoryView":1017
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1034
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1037
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1038
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1038; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1034
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1048
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1049
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1050
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1052
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1053
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1055
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1057
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1055; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1064
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1065
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1067
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1063
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1075
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1076
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1078
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1079
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1080
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1081
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
}
}
__pyx_L4_break:;
/* "View.MemoryView":1083
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1084
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1085
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1086
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
}
}
__pyx_L7_break:;
/* "View.MemoryView":1088
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1089
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1091
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1070
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1094
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
/* "View.MemoryView":1101
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1102
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1103
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1104
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1106
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1107
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1108
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":1109
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":1111
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1112
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1113
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1114
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1116
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
__pyx_t_4 = __pyx_v_dst_extent;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1117
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1121
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1122
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1094
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1124
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1127
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1124
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1131
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1134
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1136
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1137
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1139
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1131
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1142
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1151
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1152
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1153
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1154
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1156
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1157
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1158
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1160
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1142
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1163
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1174
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1175
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1177
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1178
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1179
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1179; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1182
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1183
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1184
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1185
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1186
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1188
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1192
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1193
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1194
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src, order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1196
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1197
*
* if slice_is_contig(src, order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
goto __pyx_L9;
}
/*else*/ {
/* "View.MemoryView":1199
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1201
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1163
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1206
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1209
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1209; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1208
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1206
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1212
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1213
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
if (!__pyx_t_2) {
__pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_GOTREF(__pyx_t_1);
} else {
__pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = NULL;
PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1213; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1212
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1216
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1217
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1218
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
if (!__pyx_t_5) {
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_GOTREF(__pyx_t_2);
} else {
__pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/*else*/ {
/* "View.MemoryView":1220
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":1216
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1231
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1232
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1234
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1235
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1236
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1239
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1240
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
goto __pyx_L3;
}
/* "View.MemoryView":1241
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1244
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1246
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1247
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1248
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1249
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1250
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":1252
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1252; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":1254
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1255
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1255; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1257
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(&src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1259
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1260
*
* if not slice_is_contig(&src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
goto __pyx_L10;
}
__pyx_L10:;
/* "View.MemoryView":1262
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1263
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":1265
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1268
*
*
* if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1269
*
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim);
goto __pyx_L12;
}
/* "View.MemoryView":1270
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1271
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim);
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":1273
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1275
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1276
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1277
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1278
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1279
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
}
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":1281
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1284
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1285
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1285; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "View.MemoryView":1287
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1288
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1289
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1291
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1292
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1295
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1299
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1301
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1302
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1303
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1304
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1306
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1307
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1308
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1309
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1295
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1317
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1321
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1322
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1317
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1326
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1329
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1326
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1336
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1337
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1338
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1339
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":1341
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1343
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1346
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1352
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1355
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1356
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1358
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1352
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1366
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1367
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1369
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1370
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1371
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1372
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1374
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1375
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1377
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return get_memview(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.util.choleskies_cython.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.util.choleskies_cython.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_transpose(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview__get__base(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_shape(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_strides(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_suboffsets(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_ndim(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_itemsize(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_nbytes(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_size(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.util.choleskies_cython.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryviewslice__get__base(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"GPy.util.choleskies_cython._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
"choleskies_cython",
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0},
{&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0},
{&__pyx_n_s_GPy_util_choleskies_cython, __pyx_k_GPy_util_choleskies_cython, sizeof(__pyx_k_GPy_util_choleskies_cython), 0, 0, 1, 1},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_L, __pyx_k_L, sizeof(__pyx_k_L), 0, 0, 1, 1},
{&__pyx_n_s_L_cont, __pyx_k_L_cont, sizeof(__pyx_k_L_cont), 0, 0, 1, 1},
{&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1},
{&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1},
{&__pyx_n_s_ascontiguousarray, __pyx_k_ascontiguousarray, sizeof(__pyx_k_ascontiguousarray), 0, 0, 1, 1},
{&__pyx_n_s_backprop_gradient, __pyx_k_backprop_gradient, sizeof(__pyx_k_backprop_gradient), 0, 0, 1, 1},
{&__pyx_n_s_backprop_gradient_par, __pyx_k_backprop_gradient_par, sizeof(__pyx_k_backprop_gradient_par), 0, 0, 1, 1},
{&__pyx_n_s_backprop_gradient_par_c, __pyx_k_backprop_gradient_par_c, sizeof(__pyx_k_backprop_gradient_par_c), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_dL, __pyx_k_dL, sizeof(__pyx_k_dL), 0, 0, 1, 1},
{&__pyx_n_s_dL_dK, __pyx_k_dL_dK, sizeof(__pyx_k_dL_dK), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_flat, __pyx_k_flat, sizeof(__pyx_k_flat), 0, 0, 1, 1},
{&__pyx_n_s_flat_to_triang, __pyx_k_flat_to_triang, sizeof(__pyx_k_flat_to_triang), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_k_home_james_work_GPy_GPy_util_ch, sizeof(__pyx_k_home_james_work_GPy_GPy_util_ch), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1},
{&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1},
{&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0},
{&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_ret, __pyx_k_ret, sizeof(__pyx_k_ret), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_triang_to_flat, __pyx_k_triang_to_flat, sizeof(__pyx_k_triang_to_flat), 0, 0, 1, 1},
{&__pyx_n_s_tril, __pyx_k_tril, sizeof(__pyx_k_tril), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1},
{&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION >= 3
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 788; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 260; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if (end - f) - <int>(new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 802; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 806; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":127
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":130
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if isinstance(format, unicode):
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":142
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":170
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":186
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":445
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":521
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":529
* def __get__(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__14 = PyTuple_New(1); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_INCREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__14, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":638
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__15);
__Pyx_GIVEREF(__pyx_slice__15);
/* "View.MemoryView":641
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":652
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_slice__17 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__17)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_slice__17);
__Pyx_GIVEREF(__pyx_slice__17);
/* "View.MemoryView":659
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 659; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "GPy/util/choleskies_cython.pyx":12
* cimport scipy.linalg.cython_blas as cblas
*
* def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<<
* """take a matrix N x D and return a D X M x M array where
*
*/
__pyx_tuple__19 = PyTuple_Pack(9, __pyx_n_s_flat, __pyx_n_s_M, __pyx_n_s_D, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_ret, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
__pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(2, 0, 9, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_flat_to_triang, 12, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "GPy/util/choleskies_cython.pyx":33
* return ret
*
* def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<<
* cdef int D = L.shape[0]
* cdef int M = L.shape[1]
*/
__pyx_tuple__21 = PyTuple_Pack(10, __pyx_n_s_L, __pyx_n_s_L, __pyx_n_s_D, __pyx_n_s_M, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_flat, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
__pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(1, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_triang_to_flat, 33, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "GPy/util/choleskies_cython.pyx":49
* return flat
*
* def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
__pyx_tuple__23 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
__pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient, 49, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "GPy/util/choleskies_cython.pyx":65
* return dL_dK
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<<
* cdef double[:,::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
__pyx_tuple__25 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient_par, 65, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "GPy/util/choleskies_cython.pyx":108
* dL[k, k] /= (2.0 * L[k, k])
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
*/
__pyx_tuple__27 = PyTuple_Pack(5, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_L_cont, __pyx_n_s_N); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__27);
__Pyx_GIVEREF(__pyx_tuple__27);
__pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_james_work_GPy_GPy_util_ch, __pyx_n_s_backprop_gradient_par_c, 108, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":276
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__29)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":277
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__30)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":278
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__31)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "View.MemoryView":281
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__32)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
/* "View.MemoryView":282
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__33 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__33)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__33);
__Pyx_GIVEREF(__pyx_tuple__33);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initcholeskies_cython(void); /*proto*/
PyMODINIT_FUNC initcholeskies_cython(void)
#else
PyMODINIT_FUNC PyInit_choleskies_cython(void); /*proto*/
PyMODINIT_FUNC PyInit_choleskies_cython(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_choleskies_cython(void)", 0);
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __Pyx_CyFunction_USED
if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("choleskies_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
if (__pyx_module_is_main_GPy__util__choleskies_cython) {
if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!PyDict_GetItemString(modules, "GPy.util.choleskies_cython")) {
if (unlikely(PyDict_SetItemString(modules, "GPy.util.choleskies_cython", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_array.tp_print = 0;
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 921; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type",
#if CYTHON_COMPILING_IN_PYPY
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[3]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 168; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 172; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
__pyx_t_1 = __Pyx_ImportModule("scipy.linalg.cython_blas"); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_ImportFunction(__pyx_t_1, "ddot", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_ddot, "__pyx_t_5scipy_6linalg_11cython_blas_d (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_ImportFunction(__pyx_t_1, "dscal", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dscal, "void (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_ImportFunction(__pyx_t_1, "dsymv", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dsymv, "void (char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
Py_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Execution code ---*/
/* "GPy/util/choleskies_cython.pyx":7
* # Copyright James Hensman and Alan Saul 2015
*
* import numpy as np # <<<<<<<<<<<<<<
* from cython.parallel import prange, parallel
* cimport numpy as np
*/
__pyx_t_2 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":12
* cimport scipy.linalg.cython_blas as cblas
*
* def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<<
* """take a matrix N x D and return a D X M x M array where
*
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_flat_to_triang, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":33
* return ret
*
* def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<<
* cdef int D = L.shape[0]
* cdef int M = L.shape[1]
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_triang_to_flat, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 33; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":49
* return flat
*
* def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":65
* return dL_dK
*
* def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<<
* cdef double[:,::1] dL_dK = np.tril(dL)
* cdef int N = L.shape[0]
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":108
* dL[k, k] /= (2.0 * L[k, k])
*
* def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<<
* cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig
* cdef double[:, ::1] L_cont = np.ascontiguousarray(L)
*/
__pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par_c, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "GPy/util/choleskies_cython.pyx":1
* #cython: wraparaound=False # <<<<<<<<<<<<<<
* #cython: boundscheck=False
* #cython: nonecheck=False
*/
__pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":203
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":276
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":277
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":278
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":281
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":282
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__33, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":496
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":952
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init GPy.util.choleskies_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init GPy.util.choleskies_cython");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* --- Runtime support code --- */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON
result = PyDict_GetItem(__pyx_d, name);
if (likely(result)) {
Py_INCREF(result);
} else {
#else
result = PyObject_GetItem(__pyx_d, name);
if (!result) {
PyErr_Clear();
#endif
result = __Pyx_GetBuiltinName(name);
}
return result;
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) {
#else
if (likely(PyCFunction_Check(func))) {
#endif
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject* args = PyTuple_Pack(1, arg);
return (likely(args)) ? __Pyx_PyObject_Call(func, args, NULL) : NULL;
}
#endif
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
va_start(vargs, fmt);
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return;
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_Restore(type, value, tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(type, value, tb);
#endif
}
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
if (PyObject_IsSubclass(instance_class, type)) {
type = instance_class;
} else {
instance_class = NULL;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(tmp_type, tmp_value, tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
length = strlen(cstring);
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
#else
PyErr_GetExcInfo(type, value, tb);
#endif
}
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(type, value, tb);
#endif
}
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#else
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
PyErr_Clear();
else
return NULL;
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; }
Py_DECREF(obj);
view->obj = NULL;
}
#endif
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \
{ \
func_type value = func_value; \
if (sizeof(target_type) < sizeof(func_type)) { \
if (unlikely(value != (func_type) (target_type) value)) { \
func_type zero = 0; \
if (is_unsigned && unlikely(value < zero)) \
goto raise_neg_overflow; \
else \
goto raise_overflow; \
} \
} \
return (target_type) value; \
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(int) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong(x))
} else if (sizeof(int) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
static PyObject *__pyx_memview_get_double(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(double *) itemp);
}
static int __pyx_memview_set_double(const char *itemp, PyObject *obj) {
double value = __pyx_PyFloat_AsDouble(obj);
if ((value == (double)-1) && PyErr_Occurred())
return 0;
*(double *) itemp = value;
return 1;
}
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(a, a);
case 3:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, a);
case 4:
z = __Pyx_c_prodf(a, a);
return __Pyx_c_prodf(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_absf(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(a, a);
case 3:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, a);
case 4:
z = __Pyx_c_prod(a, a);
return __Pyx_c_prod(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs->memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize)
return 0;
itemsize *= mvs->shape[index];
}
return 1;
}
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(char) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong(x))
} else if (sizeof(char) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong(x))
} else if (sizeof(long) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong(x))
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +(((PyLongObject*)x)->ob_digit[0]));
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, -(sdigit) ((PyLongObject*)x)->ob_digit[0]);
}
#endif
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong(x))
} else if (sizeof(long) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong(x))
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 3,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
#ifndef __PYX_HAVE_RT_ImportFunction
#define __PYX_HAVE_RT_ImportFunction
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, funcname);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C function %.200s",
PyModule_GetName(module), funcname);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
goto bad;
}
tmp.p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, desc);
goto bad;
}
tmp.p = PyCObject_AsVoidPtr(cobj);}
#endif
*f = tmp.fp;
if (!(*f))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else
if (__Pyx_PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
} else
#endif
#if !CYTHON_COMPILING_IN_PYPY
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b)))
return PyInt_AS_LONG(b);
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(b)) {
case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
case 0: return 0;
case 1: return ((PyLongObject*)b)->ob_digit[0];
}
#endif
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
t_initialize.c | #include <stdlib.h> /* malloc */
#include <unistd.h> /* sysconf */
#include <gpt.h>
/*
** Array (1 per thread) of linked lists of timers, and last timer in each list
*/
struct node **timers = NULL;
struct node **last = NULL;
long ticks_per_sec;
/*
** Define lock arrays depending upon the type of threading done
*/
#if ( defined THREADED_OMP )
omp_lock_t lock;
#elif ( defined THREADED_PTHREADS )
pthread_mutex_t t_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_t *threadid;
#endif
float *overhead; /* wallclock estimate of timer overhead */
int *max_indent_level; /* maximum indentation level */
int numthreads = 1; /* number of threads. 1 is for no threading */
Boolean t_initialized = false; /* whether t_initialize has been called */
Boolean wallenabled = false; /* wallclock timer stats enabled */
Boolean usrsysenabled = false; /* usr & sys timer stats enabled */
Boolean pclenabled = false; /* enable PCL library */
Boolean pcl_cyclesenabled = false; /* enable PCL cycle count */
int pcl_cyclesindex = -1; /* index for PCL cycle count */
struct PossibleEvent possible_event[] = {
{usrsys, true, "Usr Sys "},
{wall, true, "Wallclock "},
#ifdef HAVE_PCL
{pcl_start, false, " "}, /* bracket PCL entries */
{pcl_l1dcache_miss, false, "l1 D miss "},
{pcl_l2cache_miss, false, "L2 miss "},
{pcl_cycles, false, "Cycles "},
{pcl_elapsed_cycles, false, "E-Cycles "},
{pcl_fp_instr, false, "FP instr "},
{pcl_loadstore_instr, false, "L/S instr "},
{pcl_instr, false, "Instruct "},
{pcl_stall, false, "Stall "},
{pcl_end, false, " "}, /* bracket PCL entries */
#endif
};
struct Event **event = NULL;
int nevent = 0;
int npossible = sizeof (possible_event) / sizeof (struct PossibleEvent);
/*
** Needed by PCL library: otherwise unused
*/
PCL_DESCR_TYPE *descr;
int counter_list[PCL_COUNTER_MAX];
int ncounter = 0; /* number of PCL counters */
PCL_CNT_TYPE *overhead_pcl; /* overhead counter (cycles) */
/*
** t_initialize (): Initialization routine must be called from single-threaded
** region before any other timing routines may be called. The need for this
** routine could be eliminated if not targetting timing library for threaded
** capability.
**
** return value: 0 (success) or -1 (failure)
*/
int t_initialize ()
{
int n; /* index */
int nbytes; /* number of bytes for malloc */
/* int ret; */ /* return code */
/*
** Determine number of ticks per second for conversion use by other t_pr(), t_stamp()
*/
if ((ticks_per_sec = sysconf (_SC_CLK_TCK)) == -1)
return t_error ("t_initialize: token _SC_CLK_TCK is not defined\n");
#if ( ! defined DISABLE_TIMERS )
if (t_initialized)
return t_error ("t_initialize has already been called\n");
#if ( defined THREADED_OMP )
/*
** OMP: must call init_lock before using the lock (get_thread_num())
*/
omp_init_lock (&lock);
numthreads = omp_get_max_threads();
#elif ( defined THREADED_PTHREADS )
numthreads = MAX_THREADS;
#endif
/*
** Allocate space for global arrays
*/
nbytes = numthreads * sizeof (struct node *);
if ((timers = (struct node **) malloc (nbytes)) == 0)
return t_error ("malloc failure: %d items\n", numthreads);
if ((last = (struct node **) malloc (nbytes)) == 0)
return t_error ("malloc failure: %d items\n", numthreads);
nbytes = numthreads * sizeof (float);
if ((overhead = (float *) malloc (nbytes)) == 0)
return t_error ("malloc failure: %d items\n", numthreads);
nbytes = numthreads * sizeof (PCL_CNT_TYPE);
if ((overhead_pcl = (PCL_CNT_TYPE *) malloc (nbytes)) == 0)
return t_error ("malloc failure: %d items\n", numthreads);
nbytes = numthreads * sizeof (int);
if ((max_indent_level = (int *) malloc (nbytes)) == 0)
return t_error ("malloc failure for %d items\n", numthreads);
/*
** Initialize array values
*/
for (n = 0; n < numthreads; n++) {
timers[n] = 0;
last[n] = 0;
overhead[n] = 0.;
overhead_pcl[n] = 0;
max_indent_level[n] = 0;
}
#ifdef THREADED_PTHREADS
/*
** In the pthreads case, we must manage the threadid array which maps
** physical thread id's to logical id's
*/
nbytes = numthreads * sizeof (pthread_t);
if ((threadid = (pthread_t *) malloc (nbytes)) == 0)
return t_error ("malloc failure for %d items\n", numthreads);
/*
** Reset numthreads to 1 and define the threadid array now that initialization
** is done.
*/
threadid[0] = pthread_self ();
numthreads = 1;
#endif
if (get_thread_num () > 0)
return t_error ("t_initialize: should only be called by master thread\n");
for (n = 0; n < npossible; n++) {
if (possible_event[n].enabled) {
if (possible_event[n].name == usrsys)
usrsysenabled = true;
if (possible_event[n].name == wall)
wallenabled = true;
if ((event = realloc (event, (nevent+1) * sizeof (struct Event *))) == NULL)
return t_error ("realloc failure\n");
if ((event[nevent] = malloc (sizeof (struct Event))) == NULL)
return t_error ("realloc failure\n");
event[nevent]->name = possible_event[n].name;
strcpy (event[nevent]->string, possible_event[n].string);
#ifdef HAVE_PCL
/*
** Set up PCL stuff based on what t_setoption has provided.
*/
if (event[nevent]->name > pcl_start && event[nevent]->name < pcl_end) {
pclenabled = true;
event[nevent]->index = ncounter;
switch (possible_event[n].name) {
case pcl_l1dcache_miss:
counter_list[ncounter++] = PCL_L1DCACHE_MISS;
break;
case pcl_l2cache_miss:
counter_list[ncounter++] = PCL_L2CACHE_MISS;
break;
case pcl_cycles:
pcl_cyclesindex = ncounter;
pcl_cyclesenabled = true;
counter_list[ncounter++] = PCL_CYCLES;
break;
case pcl_elapsed_cycles:
counter_list[ncounter++] = PCL_ELAPSED_CYCLES;
break;
case pcl_fp_instr:
counter_list[ncounter++] = PCL_FP_INSTR;
break;
case pcl_loadstore_instr:
counter_list[ncounter++] = PCL_LOADSTORE_INSTR;
break;
case pcl_instr:
counter_list[ncounter++] = PCL_INSTR;
break;
case pcl_stall:
counter_list[ncounter++] = PCL_STALL;
break;
default:
break;
}
}
#endif
++nevent;
}
}
#ifdef HAVE_PCL
if (ncounter > 0) {
int thread; /* thread number */
nbytes = numthreads * sizeof (PCL_DESCR_TYPE);
if ((descr = (PCL_DESCR_TYPE *) malloc (nbytes)) == 0)
return t_error ("malloc failure: %d items\n", numthreads);
/*
** PCLinit must be called on a per-thread basis. Therefore must make the call here
** rather than in t_initialize. null timer list flags not initialized.
** Also, the critical section is necessary because PCLstart appears not to be
** thread-safe.
*/
#pragma omp parallel for
for (thread = 0; thread < numthreads; thread++) {
unsigned int flags; /* mode flags needed by PCL */
#pragma omp critical
{
if ((ret = PCLinit (&descr[thread])) != PCL_SUCCESS)
return t_error ("unable to allocate PCL handle for thread %d. %s\n",
thread, t_pclstr (ret));
/*
** Always count user mode only
*/
flags = PCL_MODE_USER;
if ((ret = PCLquery (descr[thread], counter_list, ncounter, flags)) != PCL_SUCCESS)
return t_error ("Bad return from PCLquery thread %d: %s\n", thread, t_pclstr (ret));
if ((ret = PCLstart (descr[thread], counter_list, ncounter, flags)) != PCL_SUCCESS)
return t_error ("PCLstart failed thread=%d: %s\n", thread, t_pclstr (ret));
}
}
}
#endif
t_initialized = true;
#endif
return 0;
}
|
dgbtrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgbtrs.c, normal z -> d, Fri Sep 28 17:38:04 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations A * X = B with triangular factorization
* computed by plasma_dpbtrf or plasma_dgbtrf.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_dgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[in] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_dgbtrs
* @sa plasma_cgbtrs
* @sa plasma_dgbtrs
* @sa plasma_sgbtrs
* @sa plasma_dpbtrf
*
******************************************************************************/
int plasma_dgbtrs(plasma_enum_t trans, int n, int kl, int ku, int nrhs,
double *pAB, int ldab,
int *ipiv,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kl < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -4;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaRealDouble, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use dgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaRealDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku,
&AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dpb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dgbtrs(trans, AB, ipiv, B, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gbtrs
*
* Solves a system of linear equations using previously
* computed factorization.
* Non-blocking tile version of plasma_dgbtrs().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] trans
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] AB
* The triangular factor U or L from the Cholesky factorization
* A = U^T*U or A = L*L^T, computed by plasma_dpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dgbtrs
* @sa plasma_omp_dgbtrs
* @sa plasma_omp_cgbtrs
* @sa plasma_omp_dgbtrs
* @sa plasma_omp_sgbtrs
* @sa plasma_omp_dgbtrf
*
******************************************************************************/
void plasma_omp_dgbtrs(plasma_enum_t trans, plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((trans != PlasmaNoTrans) &&
(trans != PlasmaTrans) &&
(trans != PlasmaConjTrans)) {
plasma_error("illegal value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
if (trans == PlasmaNoTrans) {
plasma_pdtbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pdtbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
else {
plasma_pdtbsm(PlasmaLeft, PlasmaUpper, trans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pdtbsm(PlasmaLeft, PlasmaLower, trans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
}
|
primordial.c | /** @file primordial.c Documented primordial module.
*
* Julien Lesgourgues, 24.08.2010
*
* This module computes the primordial spectra. It can be used in different modes:
* simple parametric form, evolving inflaton perturbations, etc. So far only
* the mode corresponding to a simple analytic form in terms of amplitudes, tilts
* and runnings has been developed.
*
* The following functions can be called from other modules:
*
* -# primordial_init() at the beginning (anytime after perturb_init() and before spectra_init())
* -# primordial_spectrum_at_k() at any time for computing P(k) at any k
* -# primordial_free() at the end
*/
#include "primordial.h"
/**
* Primordial spectra for arbitrary argument and for all initial conditions.
*
* This routine evaluates the primordial spectrum at a given value of k by
* interpolating in the pre-computed table.
*
* When k is not in the pre-computed range but the spectrum can be found
* analytically, it finds it. Otherwise returns an error.
*
* Can be called in two modes; linear or logarithmic:
*
* - linear: takes k, returns P(k)
*
* - logarithmic: takes ln(k), return ln(P(k))
*
* One little subtlety: in case of several correlated initial conditions,
* the cross-correlation spectrum can be negative. Then, in logarithmic mode,
* the non-diagonal elements contain the cross-correlation angle \f$ P_{12}/\sqrt{P_{11} P_{22}}\f$
* (from -1 to 1) instead of \f$\ln{P_{12}}\f$
*
* This function can be
* called from whatever module at whatever time, provided that
* primordial_init() has been called before, and primordial_free() has not
* been called yet.
*
* @param ppm Input: pointer to primordial structure containing tabulated primordial spectrum
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param mode Input: linear or logarithmic
* @param input Input: wavenumber in 1/Mpc (linear mode) or its logarithm (logarithmic mode)
* @param output Output: for each pair of initial conditions, primordial spectra P(k) in \f$Mpc^3\f$ (linear mode), or their logarithms and cross-correlation angles (logarithmic mode)
* @return the error status
*/
int primordial_spectrum_at_k(
struct primordial * ppm,
int index_md,
enum linear_or_logarithmic mode,
double input,
double * output /* array with argument output[index_ic1_ic2] (must be already allocated) */
) {
/** Summary: */
/** - define local variables */
int index_ic1,index_ic2,index_ic1_ic2;
double lnk;
int last_index;
/** - infer ln(k) from input. In linear mode, reject negative value of input k value. */
if (mode == linear) {
class_test(input<=0.,
ppm->error_message,
"k = %e",input);
lnk=log(input);
}
else {
lnk = input;
}
/** - if ln(k) is not in the interpolation range, return an error, unless
we are in the case of a analytic spectrum, for which a direct computation is possible */
if ((lnk > ppm->lnk[ppm->lnk_size-1]) || (lnk < ppm->lnk[0])) {
class_test(ppm->primordial_spec_type != analytic_Pk,
ppm->error_message,
"k=%e out of range [%e : %e]",exp(lnk),exp(ppm->lnk[0]),exp(ppm->lnk[ppm->lnk_size-1]));
/* direct computation */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
exp(lnk),
&(output[index_ic1_ic2])),
ppm->error_message,
ppm->error_message);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
/* if mode==linear, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == logarithmic) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2] = log(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] /= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
}
}
}
}
/** - otherwise, interpolate in the pre-computed table */
else {
class_call(array_interpolate_spline(
ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ddlnpk[index_md],
ppm->ic_ic_size[index_md],
lnk,
&last_index,
output,
ppm->ic_ic_size[index_md],
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* if mode==logarithmic, output is already in the correct format. Otherwise, apply necessary transformation. */
if (mode == linear) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
output[index_ic1_ic2]=exp(output[index_ic1_ic2]);
}
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
output[index_ic1_ic2] *= sqrt(output[index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md])]*
output[index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md])]);
}
else {
output[index_ic1_ic2] = 0.;
}
}
}
}
}
return _SUCCESS_;
}
/**
* This routine initializes the primordial structure (in particular, it computes table of primordial spectrum values)
*
* @param ppr Input: pointer to precision structure (defines method and precision for all computations)
* @param ppt Input: pointer to perturbation structure (useful for knowing k_min, k_max, etc.)
* @param ppm Output: pointer to initialized primordial structure
* @return the error status
*/
int primordial_init(
struct precision * ppr,
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
/** - define local variables */
double k,k_min,k_max;
int index_md,index_ic1,index_ic2,index_ic1_ic2,index_k;
double pk,pk1,pk2;
double dlnk,lnpk_pivot,lnpk_minus,lnpk_plus,lnpk_minusminus,lnpk_plusplus;
/* uncomment if you use optional test below
(for correlated isocurvature modes) */
//double cos_delta_k;
/** - check that we really need to compute the primordial spectra */
if (ppt->has_perturbations == _FALSE_) {
ppm->lnk_size=0;
if (ppm->primordial_verbose > 0)
printf("No perturbations requested. Primordial module skipped.\n");
return _SUCCESS_;
}
else {
if (ppm->primordial_verbose > 0)
printf("Computing primordial spectra");
}
/** - get kmin and kmax from perturbation structure. Test that they make sense. */
k_min = ppt->k_min; /* first value, inferred from perturbations structure */
k_max = ppt->k_max; /* last value, inferred from perturbations structure */
class_test(k_min <= 0.,
ppm->error_message,
"k_min negative or null: stop to avoid segmentation fault");
class_test(k_max <= 0.,
ppm->error_message,
"k_max negative or null: stop to avoid segmentation fault");
class_test(ppm->k_pivot <= 0.,
ppm->error_message,
"k_pivot negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= 0.,
ppm->error_message,
"k_per_decade_primordial negative or null: stop to avoid segmentation fault");
class_test(ppr->k_per_decade_primordial <= _K_PER_DECADE_PRIMORDIAL_MIN_,
ppm->error_message,
"k_per_decade_primordial = %e: you ask for such a sparse sampling of the primordial spectrum that this is probably a mistake",
ppr->k_per_decade_primordial);
/** - allocate and fill values of \f$ \ln{k}\f$'s */
class_call(primordial_get_lnk_list(ppm,
k_min,
k_max,
ppr->k_per_decade_primordial
),
ppm->error_message,
ppm->error_message);
/** - define indices and allocate tables in primordial structure */
class_call(primordial_indices(ppt,
ppm),
ppm->error_message,
ppm->error_message);
/** - deal with case of analytic primordial spectra (with amplitudes, tilts, runnings, etc.) */
if (ppm->primordial_spec_type == analytic_Pk) {
if (ppm->primordial_verbose > 0)
printf(" (analytic spectrum)\n");
class_call_except(primordial_analytic_spectrum_init(ppt,
ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
for (index_k = 0; index_k < ppm->lnk_size; index_k++) {
k=exp(ppm->lnk[index_k]);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_ic1_ic2,
k,
&pk),
ppm->error_message,
ppm->error_message);
if (index_ic1 == index_ic2) {
/* diagonal coefficients: ln[P(k)] */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = log(pk);
}
else {
/* non-diagonal coefficients: cosDelta(k) = P(k)_12/sqrt[P(k)_1 P(k)_2] */
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]),
k,
&pk1),
ppm->error_message,
ppm->error_message);
class_call(primordial_analytic_spectrum(ppm,
index_md,
index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]),
k,
&pk2),
ppm->error_message,
ppm->error_message);
/* either return an error if correlation is too large... */
/*
cos_delta_k = pk/sqrt(pk1*pk2);
class_test_except((cos_delta_k < -1.) || (cos_delta_k > 1.),
ppm->error_message,
primordial_free(ppm),
"correlation angle between IC's takes unphysical values");
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = cos_delta_k;
*/
/* ... or enforce definite positive correlation matrix */
if (pk > sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 1.;
else if (pk < -sqrt(pk1*pk2))
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = -1.;
else
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = pk/sqrt(pk1*pk2);
}
}
else {
/* non-diagonal coefficients when ic's are uncorrelated */
ppm->lnpk[index_md][index_k*ppm->ic_ic_size[index_md]+index_ic1_ic2] = 0.;
}
}
}
}
}
}
/** - deal with case of inflation with given \f$V(\phi)\f$ or \f$H(\phi)\f$ */
else if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_H) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_indices(ppm),
ppm->error_message,
ppm->error_message);
if (ppm->primordial_verbose > 0)
printf(" (simulating inflation)\n");
class_call_except(primordial_inflation_solve_inflation(ppt,ppm,ppr),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
/** - deal with the case of external calculation of \f$ P_k \f$*/
else if (ppm->primordial_spec_type == external_Pk) {
class_test(ppt->has_scalars == _FALSE_,
ppm->error_message,
"external Pk module cannot work if you do not ask for scalar modes");
class_test(ppt->has_vectors == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for vector modes");
class_test(ppt->has_bi == _TRUE_ || ppt->has_cdi == _TRUE_ || ppt->has_nid == _TRUE_ || ppt->has_niv == _TRUE_,
ppm->error_message,
"external Pk module cannot work if you ask for isocurvature modes (but that could be implemented easily in the future!)");
if (ppm->primordial_verbose > 0)
printf(" (Pk calculated externally)\n");
class_call_except(primordial_external_spectrum_init(ppt,ppm),
ppm->error_message,
ppm->error_message,
primordial_free(ppm));
}
else {
class_test(0==0,
ppm->error_message,
"primordial spectrum type not recognized");
}
/** - compute second derivative of each \f$ \ln{P_k} \f$ versus lnk with spline, in view of interpolation */
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_call(array_spline_table_lines(ppm->lnk,
ppm->lnk_size,
ppm->lnpk[index_md],
ppm->ic_ic_size[index_md],
ppm->ddlnpk[index_md],
_SPLINE_EST_DERIV_,
ppm->error_message),
ppm->error_message,
ppm->error_message);
}
/** - derive spectral parameters from numerically computed spectra
(not used by the rest of the code, but useful to keep in memory for several types of investigation) */
if (ppm->primordial_spec_type != analytic_Pk) {
dlnk = log(10.)/ppr->k_per_decade_primordial;
if (ppt->has_scalars == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->A_s = exp(lnpk_pivot);
ppm->n_s = (lnpk_plus-lnpk_minus)/(2.*dlnk)+1.;
ppm->alpha_s = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
/** - expression for alpha_s comes from:
`ns_2 = (lnpk_plus-lnpk_pivot)/(dlnk)+1`
`ns_1 = (lnpk_pivot-lnpk_minus)/(dlnk)+1`
`alpha_s = dns/dlnk = (ns_2-ns_1)/dlnk = (lnpk_plus-lnpk_pivot-lnpk_pivot+lnpk_minus)/(dlnk)/(dlnk)`
**/
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)+2.*dlnk,
&lnpk_plusplus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_scalars,
logarithmic,
log(ppm->k_pivot)-2.*dlnk,
&lnpk_minusminus),
ppm->error_message,
ppm->error_message);
/** - expression for beta_s:
`ppm->beta_s = (alpha_plus-alpha_minus)/dlnk = (lnpk_plusplus-2.*lnpk_plus+lnpk_pivot -
(lnpk_pivot-2.*lnpk_minus+lnpk_minusminus)/pow(dlnk,3)`
**/
/* Simplification of the beta_s expression: */
ppm->beta_s = (lnpk_plusplus-2.*lnpk_plus+2.*lnpk_minus-lnpk_minusminus)/pow(dlnk,3);
if (ppm->primordial_verbose > 0)
printf(" -> A_s=%g n_s=%g alpha_s=%g\n",ppm->A_s,ppm->n_s,ppm->alpha_s);
}
if (ppt->has_tensors == _TRUE_) {
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot),
&lnpk_pivot),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)+dlnk,
&lnpk_plus),
ppm->error_message,
ppm->error_message);
class_call(primordial_spectrum_at_k(ppm,
ppt->index_md_tensors,
logarithmic,
log(ppm->k_pivot)-dlnk,
&lnpk_minus),
ppm->error_message,
ppm->error_message);
ppm->r = exp(lnpk_pivot)/ppm->A_s;
ppm->n_t = (lnpk_plus-lnpk_minus)/(2.*dlnk);
ppm->alpha_t = (lnpk_plus-2.*lnpk_pivot+lnpk_minus)/pow(dlnk,2);
if (ppm->primordial_verbose > 0)
printf(" -> r=%g n_t=%g alpha_t=%g\n",ppm->r,ppm->n_t,ppm->alpha_t);
}
}
return _SUCCESS_;
}
/**
* This routine frees all the memory space allocated by primordial_init().
*
* To be called at the end of each run.
*
* @param ppm Input: pointer to primordial structure (which fields must be freed)
* @return the error status
*/
int primordial_free(
struct primordial * ppm
) {
int index_md;
if (ppm->lnk_size > 0) {
if (ppm->primordial_spec_type == analytic_Pk) {
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->amplitude[index_md]);
free(ppm->tilt[index_md]);
free(ppm->running[index_md]);
}
free(ppm->amplitude);
free(ppm->tilt);
free(ppm->running);
}
else if (ppm->primordial_spec_type == external_Pk) {
free(ppm->command);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
free(ppm->lnpk[index_md]);
free(ppm->ddlnpk[index_md]);
free(ppm->is_non_zero[index_md]);
}
free(ppm->lnpk);
free(ppm->ddlnpk);
free(ppm->is_non_zero);
free(ppm->ic_size);
free(ppm->ic_ic_size);
free(ppm->lnk);
}
return _SUCCESS_;
}
/**
* This routine defines indices and allocates tables in the primordial structure
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_indices(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md;
ppm->md_size = ppt->md_size;
class_alloc(ppm->lnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ddlnpk,ppt->md_size*sizeof(double*),ppm->error_message);
class_alloc(ppm->ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->ic_ic_size,ppt->md_size*sizeof(int*),ppm->error_message);
class_alloc(ppm->is_non_zero,ppm->md_size*sizeof(short *),ppm->error_message);
for (index_md = 0; index_md < ppt->md_size; index_md++) {
ppm->ic_size[index_md] = ppt->ic_size[index_md];
ppm->ic_ic_size[index_md] = (ppm->ic_size[index_md]*(ppm->ic_size[index_md]+1))/2;
class_alloc(ppm->lnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->ddlnpk[index_md],
ppm->lnk_size*ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->is_non_zero[index_md],
ppm->ic_ic_size[index_md]*sizeof(short),
ppm->error_message);
}
return _SUCCESS_;
}
/**
* This routine allocates and fills the list of wavenumbers k
*
*
* @param ppm Input/output: pointer to primordial structure
* @param kmin Input: first value
* @param kmax Input: last value that we should encompass
* @param k_per_decade Input: number of k per decade
* @return the error status
*/
int primordial_get_lnk_list(
struct primordial * ppm,
double kmin,
double kmax,
double k_per_decade
) {
int i;
class_test((kmin <= 0.) || (kmax <= kmin),
ppm->error_message,
"inconsistent values of kmin=%e, kmax=%e",kmin,kmax);
ppm->lnk_size = (int)(log(kmax/kmin)/log(10.)*k_per_decade) + 2;
class_alloc(ppm->lnk,ppm->lnk_size*sizeof(double),ppm->error_message);
for (i=0; i<ppm->lnk_size; i++)
ppm->lnk[i]=log(kmin)+i*log(10.)/k_per_decade;
return _SUCCESS_;
}
/**
* This routine interprets and stores in a condensed form the input parameters
* in the case of a simple analytic spectra with amplitudes, tilts, runnings,
* in such way that later on, the spectrum can be obtained by a quick call to
* the routine primordial_analytic_spectrum(()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_analytic_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
int index_md,index_ic1,index_ic2;
int index_ic1_ic2,index_ic1_ic1,index_ic2_ic2;
double one_amplitude=0.;
double one_tilt=0.;
double one_running=0.;
double one_correlation=0.;
class_alloc(ppm->amplitude,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->tilt,
ppm->md_size*sizeof(double *),
ppm->error_message);
class_alloc(ppm->running,
ppm->md_size*sizeof(double *),
ppm->error_message);
for (index_md = 0; index_md < ppm->md_size; index_md++) {
class_alloc(ppm->amplitude[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->tilt[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
class_alloc(ppm->running[index_md],
ppm->ic_ic_size[index_md]*sizeof(double),
ppm->error_message);
}
for (index_md = 0; index_md < ppm->md_size; index_md++) {
/* diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (index_ic1 == ppt->index_ic_ad)) {
one_amplitude = ppm->A_s;
one_tilt = ppm->n_s;
one_running = ppm->alpha_s;
}
if ((ppt->has_bi == _TRUE_) && (index_ic1 == ppt->index_ic_bi)) {
one_amplitude = ppm->A_s*ppm->f_bi*ppm->f_bi;
one_tilt = ppm->n_bi;
one_running = ppm->alpha_bi;
}
if ((ppt->has_cdi == _TRUE_) && (index_ic1 == ppt->index_ic_cdi)) {
one_amplitude = ppm->A_s*ppm->f_cdi*ppm->f_cdi;
one_tilt = ppm->n_cdi;
one_running = ppm->alpha_cdi;
}
if ((ppt->has_nid == _TRUE_) && (index_ic1 == ppt->index_ic_nid)) {
one_amplitude = ppm->A_s*ppm->f_nid*ppm->f_nid;
one_tilt = ppm->n_nid;
one_running = ppm->alpha_nid;
}
if ((ppt->has_niv == _TRUE_) && (index_ic1 == ppt->index_ic_niv)) {
one_amplitude = ppm->A_s*ppm->f_niv*ppm->f_niv;
one_tilt = ppm->n_niv;
one_running = ppm->alpha_niv;
}
}
if (_tensors_) {
if (index_ic1 == ppt->index_ic_ten) {
one_amplitude = ppm->A_s*ppm->r;
one_tilt = ppm->n_t+1.; /* +1 to match usual definition of n_t (equivalent to n_s-1) */
one_running = ppm->alpha_t;
}
}
class_test(one_amplitude <= 0.,
ppm->error_message,
"inconsistent input for primordial amplitude: %g for index_md=%d, index_ic=%d\n",
one_amplitude,index_md,index_ic1);
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] = one_amplitude;
ppm->tilt[index_md][index_ic1_ic2] = one_tilt;
ppm->running[index_md][index_ic1_ic2] = one_running;
}
/* non-diagonal coefficients */
for (index_ic1 = 0; index_ic1 < ppm->ic_size[index_md]; index_ic1++) {
for (index_ic2 = index_ic1+1; index_ic2 < ppm->ic_size[index_md]; index_ic2++) {
if (_scalars_) {
if ((ppt->has_ad == _TRUE_) && (ppt->has_bi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_bi)) ||
((index_ic1 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_bi)))) {
one_correlation = ppm->c_ad_bi;
one_tilt = ppm->n_ad_bi;
one_running = ppm->alpha_ad_bi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_ad_cdi;
one_tilt = ppm->n_ad_cdi;
one_running = ppm->alpha_ad_cdi;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_ad_nid;
one_tilt = ppm->n_ad_nid;
one_running = ppm->alpha_ad_nid;
}
if ((ppt->has_ad == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_ad) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_ad) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_ad_niv;
one_tilt = ppm->n_ad_niv;
one_running = ppm->alpha_ad_niv;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_cdi == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_cdi)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_cdi)))) {
one_correlation = ppm->c_bi_cdi;
one_tilt = ppm->n_bi_cdi;
one_running = ppm->alpha_bi_cdi;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_bi_nid;
one_tilt = ppm->n_bi_nid;
one_running = ppm->alpha_bi_nid;
}
if ((ppt->has_bi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_bi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_bi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_bi_niv;
one_tilt = ppm->n_bi_niv;
one_running = ppm->alpha_bi_niv;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_nid == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_nid)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_nid)))) {
one_correlation = ppm->c_cdi_nid;
one_tilt = ppm->n_cdi_nid;
one_running = ppm->alpha_cdi_nid;
}
if ((ppt->has_cdi == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_cdi) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_cdi) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_cdi_niv;
one_tilt = ppm->n_cdi_niv;
one_running = ppm->alpha_cdi_niv;
}
if ((ppt->has_nid == _TRUE_) && (ppt->has_niv == _TRUE_) &&
(((index_ic1 == ppt->index_ic_nid) && (index_ic2 == ppt->index_ic_niv)) ||
((index_ic2 == ppt->index_ic_nid) && (index_ic1 == ppt->index_ic_niv)))) {
one_correlation = ppm->c_nid_niv;
one_tilt = ppm->n_nid_niv;
one_running = ppm->alpha_nid_niv;
}
}
class_test((one_correlation < -1) || (one_correlation > 1),
ppm->error_message,
"inconsistent input for isocurvature cross-correlation\n");
index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,ppm->ic_size[index_md]);
index_ic1_ic1 = index_symmetric_matrix(index_ic1,index_ic1,ppm->ic_size[index_md]);
index_ic2_ic2 = index_symmetric_matrix(index_ic2,index_ic2,ppm->ic_size[index_md]);
if (one_correlation == 0.) {
ppm->is_non_zero[index_md][index_ic1_ic2] = _FALSE_;
ppm->amplitude[index_md][index_ic1_ic2] = 0.;
ppm->tilt[index_md][index_ic1_ic2] = 0.;
ppm->running[index_md][index_ic1_ic2] = 0.;
}
else {
ppm->is_non_zero[index_md][index_ic1_ic2] = _TRUE_;
ppm->amplitude[index_md][index_ic1_ic2] =
sqrt(ppm->amplitude[index_md][index_ic1_ic1]*
ppm->amplitude[index_md][index_ic2_ic2])*
one_correlation;
ppm->tilt[index_md][index_ic1_ic2] =
0.5*(ppm->tilt[index_md][index_ic1_ic1]
+ppm->tilt[index_md][index_ic2_ic2])
+ one_tilt;
ppm->running[index_md][index_ic1_ic2] =
0.5*(ppm->running[index_md][index_ic1_ic1]
+ppm->running[index_md][index_ic2_ic2])
+ one_running;
}
}
}
}
return _SUCCESS_;
}
/**
* This routine returns the primordial spectrum in the simple analytic case with
* amplitudes, tilts, runnings, for each mode (scalar/tensor...),
* pair of initial conditions, and wavenumber.
*
* @param ppm Input/output: pointer to primordial structure
* @param index_md Input: index of mode (scalar, tensor, ...)
* @param index_ic1_ic2 Input: pair of initial conditions (ic1, ic2)
* @param k Input: wavenumber in same units as pivot scale, i.e. in 1/Mpc
* @param pk Output: primordial power spectrum A (k/k_pivot)^(n+...)
* @return the error status
*/
int primordial_analytic_spectrum(
struct primordial * ppm,
int index_md,
int index_ic1_ic2,
double k,
double * pk
) {
if (ppm->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) {
*pk = ppm->amplitude[index_md][index_ic1_ic2]
*exp((ppm->tilt[index_md][index_ic1_ic2]-1.)*log(k/ppm->k_pivot)
+ 0.5 * ppm->running[index_md][index_ic1_ic2] * pow(log(k/ppm->k_pivot), 2.));
}
else {
*pk = 0.;
}
return _SUCCESS_;
}
/**
* This routine encodes the inflaton scalar potential
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
double e,de,dde,mu,dmu,ddmu,l,dl,ddl,p,dp,ddp;
switch (ppm->potential) {
/* V(phi)=polynomial in phi */
case polynomial:
*V = ppm->V0+phi*ppm->V1+pow(phi,2)/2.*ppm->V2+pow(phi,3)/6.*ppm->V3+pow(phi,4)/24.*ppm->V4;
*dV = ppm->V1+phi*ppm->V2+pow(phi,2)/2.*ppm->V3+pow(phi,3)/6.*ppm->V4;
*ddV = ppm->V2+phi*ppm->V3+pow(phi,2)/2.*ppm->V4;
break;
/* V(phi) = Lambda^4(1+cos(phi/f)) = V0 (1+cos(phi/V1)) */
case natural:
*V = ppm->V0*(1.+cos(phi/ppm->V1));
*dV = -ppm->V0/ppm->V1*sin(phi/ppm->V1);
*ddV = -ppm->V0/ppm->V1/ppm->V1*cos(phi/ppm->V1);
break;
/* Higgs inflation from arXiv:1403.6078 */
case higgs_inflation:
// correspondence with 1403.6078:
// V0 = b
// V1 = ksi
// V2 = kappa
// V3 = delta_lambda
// mu = bar(mu)/M_P
// phi = -chi/M_P
e = exp(2./sqrt(6.)*sqrt(8.*_PI_)*phi);
de = 2./sqrt(6.)*sqrt(8.*_PI_)*e;
dde = 2./3. * 8.*_PI_ * e;
mu = pow(1.-e,0.5);
dmu = -0.5*de*pow(1.-e,-0.5);
ddmu = -0.5*dde*pow(1.-e,-0.5)-0.25*de*de*pow(1.-e,-1.5);
l = log(mu/ppm->V2);
dl = dmu/mu;
ddl = ddmu/mu - dl*dl;
p = 1./16. + ppm->V3/ppm->V0 + l*l;
dp = 2.*dl*l;
ddp = 2.*ddl*l+2.*dl*dl;
*V = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*p*pow(mu,4);
*dV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(dp*pow(mu,4)+4.*p*dmu*pow(mu,3));
*ddV = ppm->V0/4./pow(8.*_PI_,2)/ppm->V1/ppm->V1*(ddp*pow(mu,4)+8.*dp*dmu*pow(mu,3)+4.*p*ddmu*pow(mu,3)+12.*p*pow(dmu*mu,2));
//fprintf(stderr,"%e %e %e\n",*V,p,mu);
break;
/* code here other shapes */
default:
class_stop(ppm->error_message,"ppm->potential=%d different from all known cases",ppm->potential);
break;
}
return _SUCCESS_;
}
/**
* This routine encodes the function \f$ H(\phi)\f$
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: background inflaton field value in units of Mp
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
*H = ppm->H0 + phi*ppm->H1 + pow(phi,2)/2.*ppm->H2 + pow(phi,3)/6.*ppm->H3 + pow(phi,4)/24.*ppm->H4;
*dH = ppm->H1 + phi*ppm->H2 + pow(phi,2)/2.*ppm->H3 + pow(phi,3)/6.*ppm->H4;
*ddH = ppm->H2 + phi*ppm->H3 + pow(phi,2)/2.*ppm->H4;
*dddH = ppm->H3 + phi*ppm->H4;
return _SUCCESS_;
}
/**
* This routine defines indices used by the inflation simulator
*
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_inflation_indices(
struct primordial * ppm
) {
int index_in;
index_in = 0;
/* indices for background quantities */
ppm->index_in_a = index_in;
index_in ++;
ppm->index_in_phi = index_in;
index_in ++;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
ppm->index_in_dphi = index_in;
index_in ++;
}
/* size of background vector */
ppm->in_bg_size = index_in;
/* indices for perturbations */
ppm->index_in_ksi_re = index_in;
index_in ++;
ppm->index_in_ksi_im = index_in;
index_in ++;
ppm->index_in_dksi_re = index_in;
index_in ++;
ppm->index_in_dksi_im = index_in;
index_in ++;
ppm->index_in_ah_re = index_in;
index_in ++;
ppm->index_in_ah_im = index_in;
index_in ++;
ppm->index_in_dah_re = index_in;
index_in ++;
ppm->index_in_dah_im = index_in;
index_in ++;
/* size of perturbation vector */
ppm->in_size = index_in;
return _SUCCESS_;
}
/**
* Main routine of inflation simulator. Its goal is to check the
* background evolution before and after the pivot value
* phi=phi_pivot, and then, if this evolution is suitable, to call the
* routine primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @return the error status
*/
int primordial_inflation_solve_inflation(
struct perturbs * ppt,
struct primordial * ppm,
struct precision *ppr
) {
/** Summary: */
/** - define local variables */
double * y;
double * y_ini;
double * dy;
double a_pivot;
double a_try;
double H_pivot;
double H_try;
double phi_try;
double dphidt_pivot;
double dphidt_try;
double aH_ini,aH_end;
double k_max,k_min;
int counter;
double dH,ddH,dddH;
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(y_ini,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - eventually, needs first to find phi_pivot */
if (ppm->primordial_spec_type == inflation_V_end) {
class_call(primordial_inflation_find_phi_pivot(ppm,ppr,y,dy),
ppm->error_message,
ppm->error_message);
}
else {
ppm->phi_pivot = 0.;
}
// uncomment these lines if for checking, you want first-order slow-roll predictions
/*
if (ppm->primordial_verbose>0) {
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
double V,dV,ddV;
class_call(primordial_inflation_check_potential(ppm,ppm->phi_pivot,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
fprintf(stdout," -> 1st-order slow-roll prediction for A_s: %g\n",128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for T/S: %g\n",pow(dV/V,2)/_PI_);
fprintf(stdout," -> 1st-order slow-roll prediction for A_T: %g\n",pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2));
fprintf(stdout," -> 1st-order slow-roll prediction for n_s: %g\n",1.-6./16./_PI_*pow(dV/V,2)+2./8./_PI_*(ddV/V));
fprintf(stdout," -> 1st-order slow-roll prediction for n_t: %g\n",-2./16./_PI_*pow(dV/V,2));
}
}
*/
/** - compute H_pivot at phi_pivot */
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
/** - check positivity and negative slope of potential in field pivot
value, and find value of phi_dot and H for field's pivot value,
assuming slow-roll attractor solution has been reached. If no
solution, code will stop there. */
if (ppm->primordial_verbose > 1)
printf(" (search attractor at pivot)\n");
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_pivot,
ppr->primordial_inflation_attractor_precision_pivot,
y,
dy,
&H_pivot,
&dphidt_pivot),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
break;
case inflation_H:
/** - check positivity and negative slope of \f$ H(\phi)\f$ in field pivot
value, and get H_pivot */
class_call_except(primordial_inflation_check_hubble(ppm,
ppm->phi_pivot,
&H_pivot,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
break;
default:
free(y);free(y_ini);free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - find a_pivot, value of scale factor when k_pivot crosses horizon while phi=phi_pivot */
a_pivot = ppm->k_pivot/H_pivot;
/** - integrate background solution starting from phi_pivot and until
k_max>>aH. This ensures that the inflationary model considered
here is valid and that the primordial spectrum can be
computed. Otherwise, if slow-roll brakes too early, model is not
suitable and run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration after phi_pivot=%e)\n",ppm->phi_pivot);
k_max = exp(ppm->lnk[ppm->lnk_size-1]);
aH_end = k_max/ppr->primordial_inflation_ratio_max;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = a_pivot*dphidt_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_end,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* we need to do the opposite: to check that there is an initial
time such that k_min << (aH)_ini. A guess is made by integrating
backward in time. This can be done exactly for inflation_H, or
only approximately for inflation_V (using the first-order
approximation to the attractor inflationary solution). However
this approximation is irrelevant because nevertheless, later on,
we compute the attractor solution at the initial time with high
accuracy, and then we integrate the background equations forward
in time. Hence the approximation made here introduces zero
mistake on the final result. It is just a way to find quickly a
reasonable initial phi value. In the inflation_V case, if the
exact forward integration reveals that the guess was not good
(i.e. does not correspond to "early enough"), we iterate over
sequences of backward/forward integration, until a correct time is
found. For potential such that no solution exists (no long-enough
slow-roll period before the pivot scale), the run stops. */
if (ppm->primordial_verbose > 1)
printf(" (check inflation duration before pivot)\n");
k_min = exp(ppm->lnk[0]);
aH_ini = k_min/ppr->primordial_inflation_ratio_min;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
counter = 0;
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
do {
/* counter to avoid infinite loop */
counter ++;
class_test_except(counter >= ppr->primordial_inflation_phi_ini_maxit,
ppm->error_message,
free(y);free(y_ini);free(dy),
"when searching for an initial value of phi just before observable inflation takes place, could not converge after %d iterations. The potential does not allow eough inflationary e-folds before reaching the pivot scale",
counter);
/* try to find a value phi_try such that
aH=aH_ini*(ppr->primordial_inflation_aH_ini_target) (default:
aH_ini*0.9). But this is using the approximate backward
solution. So, anyway, we will check using the exact forward
solution that at this phi_try, we really have aH < aH_ini; if
this is not the case, we will iterate until a correct phi_try
is found. */
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini*ppr->primordial_inflation_aH_ini_target,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
phi_try = y[ppm->index_in_phi];
/* in inflation_V case, find the accurate attractor solution for
phi_ini', and then the correct value of a_ini, and finally of
dphi/dtau_ini */
/* find dphi/dt_ini (unlike dphi/dtau_ini, this does not depend on normalization of a) */
class_call_except(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* we need to normalize a properly so that a=a_pivot when
phi=phi_pivot. To do so, we evolve starting arbitrarily from
a_ini=1, and then we rescale a_ini appropriately. */
y[ppm->index_in_a] = 1.;
y[ppm->index_in_phi] = phi_try;
y[ppm->index_in_dphi] = y[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_pivot,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
/* now impose the correct a_ini */
a_try = a_pivot/y[ppm->index_in_a];
/* in case another iteration will be needed, set a new starting point for the routine primordial_inflation_evolve_background(...,backward) */
y[ppm->index_in_a] = a_try;
y[ppm->index_in_phi] = phi_try;
} while (a_try*H_try > aH_ini);
y_ini[ppm->index_in_a] = a_try;
y_ini[ppm->index_in_phi] = phi_try;
y_ini[ppm->index_in_dphi] = y_ini[ppm->index_in_a]*dphidt_try; // dphi/dtau = a dphi/dt
break;
case inflation_H:
y[ppm->index_in_a] = a_pivot;
y[ppm->index_in_phi] = ppm->phi_pivot;
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
aH_ini,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
y_ini[ppm->index_in_a] = y[ppm->index_in_a];
y_ini[ppm->index_in_phi] = y[ppm->index_in_phi];
break;
default:
free(y);free(y_ini);free(dy);
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
/** - starting from this time, i.e. from y_ini[ ], we run the routine
which takes care of computing the primordial spectrum. */
if (ppm->primordial_verbose > 1)
printf(" (compute spectrum)\n");
if (ppm->behavior == numerical) {
class_call_except(primordial_inflation_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
}
else if (ppm->behavior == analytical) {
class_call_except(primordial_inflation_analytic_spectra(ppt,
ppm,
ppr,
y_ini),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
}
else {
class_stop(ppm->error_message,"Uncomprehensible value of the flag ppm->behavior=%d",ppm->behavior);
}
/** - before ending, we want to compute and store the values of \f$ \phi \f$
corresponding to k=aH for k_min and k_max */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
ppm->phi_min=y[ppm->index_in_phi];
class_call_except(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k_max,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message,
free(y);free(y_ini);free(dy));
ppm->phi_max=y[ppm->index_in_phi];
if (ppm->primordial_verbose > 1)
printf(" (observable power spectrum goes from %e to %e)\n",
ppm->phi_min,
ppm->phi_max);
/** - finally, we can de-allocate */
free(y);
free(y_ini);
free(dy);
return _SUCCESS_;
}
/**
* Routine for the computation of an analytic apporoximation to the
* the primordial spectrum. In general, should be used only for
* comparing with exact numerical computation performed by
* primordial_inflation_spectra().
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_analytic_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
double * y;
double * dy;
int index_k;
double k,phi_k;
double curvature,tensors;
double V,dV,ddV;
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
k = exp(ppm->lnk[index_k]);
/* evolve background until k=aH is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - read value of phi at time when k=aH */
phi_k = y[ppm->index_in_phi];
/** - get potential (and its derivatives) at this value */
class_call(primordial_inflation_check_potential(ppm,phi_k,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
/** - calculate the analytic slow-roll formula for the spectra */
curvature = 128.*_PI_/3.*pow(V,3)/pow(dV,2);
tensors = pow(dV/V,2)/_PI_*128.*_PI_/3.*pow(V,3)/pow(dV,2);
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
}
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine with a loop over wavenumbers for the computation of the primordial
* spectrum. For each wavenumber it calls primordial_inflation_one_wavenumber()
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @return the error status
*/
int primordial_inflation_spectra(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini
) {
int index_k;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
#ifdef _OPENMP
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
abort = _FALSE_;
#pragma omp parallel shared(ppt,ppm,ppr,abort,y_ini) private(index_k,thread,tspent,tstart,tstop) num_threads(number_of_threads)
{
#ifdef _OPENMP
thread = omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* loop over Fourier wavenumbers */
for (index_k=0; index_k < ppm->lnk_size; index_k++) {
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(primordial_inflation_one_wavenumber(ppt,ppm,ppr,y_ini,index_k),
ppm->error_message,
ppm->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
}
#ifdef _OPENMP
if (ppm->primordial_verbose>1)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,thread);
#endif
} /* end of parallel zone */
if (abort == _TRUE_) return _FAILURE_;
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
/**
* Routine coordinating the computation of the primordial
* spectrum for one wavenumber. It calls primordial_inflation_one_k() to
* integrate the perturbation equations, and then it stores the result
* for the scalar/tensor spectra.
*
* @param ppt Input: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y_ini Input: initial conditions for the vector of background/perturbations, already allocated and filled
* @param index_k Input: index of wavenumber to be considered
* @return the error status
*/
int primordial_inflation_one_wavenumber(
struct perturbs * ppt,
struct primordial * ppm,
struct precision * ppr,
double * y_ini,
int index_k
) {
double k;
double curvature,tensors;
double * y;
double * dy;
k = exp(ppm->lnk[index_k]);
/** Summary */
/** - allocate vectors for background/perturbed quantities */
class_alloc(y,ppm->in_size*sizeof(double),ppm->error_message);
class_alloc(dy,ppm->in_size*sizeof(double),ppm->error_message);
/** - initialize the background part of the running vector */
y[ppm->index_in_a] = y_ini[ppm->index_in_a];
y[ppm->index_in_phi] = y_ini[ppm->index_in_phi];
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))
y[ppm->index_in_dphi] = y_ini[ppm->index_in_dphi];
/** - evolve the background until the relevant initial time for
integrating perturbations */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
k/ppr->primordial_inflation_ratio_min,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/** - evolve the background/perturbation equations from this time and
until some time after Horizon crossing */
class_call(primordial_inflation_one_k(ppm,
ppr,
k,
y,
dy,
&curvature,
&tensors),
ppm->error_message,
ppm->error_message);
free(y);
free(dy);
class_test(curvature<=0.,
ppm->error_message,
"negative curvature spectrum");
class_test(tensors<=0.,
ppm->error_message,
"negative tensor spectrum");
/** - store the obtained result for curvature and tensor perturbations */
ppm->lnpk[ppt->index_md_scalars][index_k] = log(curvature);
ppm->lnpk[ppt->index_md_tensors][index_k] = log(tensors);
/* uncomment if you want to print here the spectra for testing */
/* fprintf(stderr,"%e %e %e\n", */
/* ppm->lnk[index_k], */
/* ppm->lnpk[ppt->index_md_scalars][index_k], */
/* ppm->lnpk[ppt->index_md_tensors][index_k]); */
return _SUCCESS_;
}
/**
* Routine integrating the background plus perturbation equations for
* each wavenumber, and returning the scalar and tensor spectrum.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param k Input: Fourier wavenumber
* @param y Input: running vector of background/perturbations, already allocated and initialized
* @param dy Input: running vector of background/perturbation derivatives, already allocated
* @param curvature Output: curvature perturbation
* @param tensor Output: tensor perturbation
* @return the error status
*/
int primordial_inflation_one_k(
struct primordial * ppm,
struct precision * ppr,
double k,
double * y,
double * dy,
double * curvature,
double * tensor
) {
/** Summary: */
/** - define local variables */
double tau_start,tau_end,dtau;
double z,ksi2,ah2;
double aH;
double curvature_old;
double curvature_new;
double dlnPdN;
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
/** - initialize the generic integrator (same integrator already used
in background, thermodynamics and perturbation modules) */
pipaw.ppm = ppm;
pipaw.N = ppm->in_size;
pipaw.integrate = forward;
pipaw.time = conformal;
pipaw.k = k;
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* initial conditions for the perturbations, Bunch-Davies vacuum */
y[ppm->index_in_ksi_re]=1./sqrt(2.*k);
y[ppm->index_in_ksi_im]=0.;
y[ppm->index_in_dksi_re]=0.;
y[ppm->index_in_dksi_im]=-k*y[ppm->index_in_ksi_re];
y[ppm->index_in_ah_re]=1./sqrt(2.*k);
y[ppm->index_in_ah_im]=0.;
y[ppm->index_in_dah_re]=0.;
y[ppm->index_in_dah_im]=-k*y[ppm->index_in_ah_re];
/** - initialize variable used for deciding when to stop the calculation (= when the curvature remains stable) */
curvature_new = _HUGE_;
/** - initialize conformal time to arbitrary value (here, only variations
of tau matter: the equations that we integrate do not depend
explicitly on time) */
tau_end = 0;
/** - compute derivative of initial vector and infer first value of adaptive time-step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/** - loop over time */
do {
/* new time interval [tau_start, tau_end] over which equations will be integrated */
tau_start = tau_end;
tau_end = tau_start + dtau;
class_test(dtau/tau_start < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
/* evolve the system */
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* compute derivatives at tau_end, useful to infer new time step and spectra */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
/* new time step */
dtau = ppr->primordial_inflation_pt_stepsize*2.*_PI_
/MAX(sqrt(fabs(dy[ppm->index_in_dksi_re]/y[ppm->index_in_ksi_re])),k);
/* new aH */
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
/* store previous value of curvature (at tau_start) */
curvature_old = curvature_new;
/* new curvature */
z = y[ppm->index_in_a]*dy[ppm->index_in_phi]/aH;
ksi2 = y[ppm->index_in_ksi_re]*y[ppm->index_in_ksi_re]+y[ppm->index_in_ksi_im]*y[ppm->index_in_ksi_im];
curvature_new = k*k*k/2./_PI_/_PI_*ksi2/z/z;
/* variation of curvature with time (dimensionless) */
dlnPdN = (curvature_new-curvature_old)/dtau*y[ppm->index_in_a]/dy[ppm->index_in_a]/curvature_new;
/* stop when (k >> aH) AND curvature is stable */
} while ((k/aH >= ppr->primordial_inflation_ratio_max) || (fabs(dlnPdN) > ppr->primordial_inflation_tol_curvature));
/** - clean the generic integrator */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/** - store final value of curvature for this wavenumber */
*curvature = curvature_new;
/** - store final value of tensor perturbation for this wavenumber */
ah2 = y[ppm->index_in_ah_re]*y[ppm->index_in_ah_re]+y[ppm->index_in_ah_im]*y[ppm->index_in_ah_im];
*tensor = 32.*k*k*k/_PI_*ah2/y[ppm->index_in_a]/y[ppm->index_in_a];
//fprintf(stdout,"%g %g %g %g %g\n",k,*curvature,*tensor,*tensor/(*curvature),dlnPdN);
return _SUCCESS_;
}
/**
* Routine searching for the inflationary attractor solution at a
* given phi_0, by iterations, with a given tolerance. If no solution
* found within tolerance, returns error message. The principle is the
* following. The code starts integrating the background equations
* from various values of phi, corresponding to earlier and earlier
* value before phi_0, and separated by a small arbitrary step size,
* corresponding roughly to 1 e-fold of inflation. Each time, the
* integration starts with the initial condition \f$ \phi=-V'/3H\f$ (slow-roll
* prediction). If the found value of \f$\phi'\f$ in phi_0 is stable (up to
* the parameter "precision"), the code considers that there is an
* attractor, and stops iterating. If this process does not converge,
* it returns an error message.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param phi_0 Input: field value at which we wish to find the solution
* @param precision Input: tolerance on output values (if too large, an attractor will always considered to be found)
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param H_0 Output: Hubble value at phi_0 for attractor solution
* @param dphidt_0 Output: field derivative value at phi_0 for attractor solution
* @return the error status
*/
int primordial_inflation_find_attractor(
struct primordial * ppm,
struct precision * ppr,
double phi_0,
double precision,
double * y,
double * dy,
double * H_0,
double * dphidt_0
) {
double V_0,dV_0,ddV_0;
double V=0.,dV=0.,ddV=0.;
double a;
double dphidt,dphidt_0new,dphidt_0old,phi;
int counter;
/* we want a series of value of phi' in phi_0, obtained by
integrating the system from earlier and earlier time. The first
value iof the series is the slow-roll prediction phi' =
-V'/3H. The following lines compute this value and initialize relevant quantities. */
class_call(primordial_inflation_check_potential(ppm,phi_0,&V_0,&dV_0,&ddV_0),
ppm->error_message,
ppm->error_message);
dphidt_0new = -dV_0/3./sqrt((8.*_PI_/3.)*V_0);
phi = phi_0;
counter = 0;
dphidt_0old = dphidt_0new/(precision+2.); // this silly value just
// ensures that the loop
// below will be executed
// at least once.
/* loop over different values of phi, from which the background
equations are integrated until phi_0 */
while (fabs(dphidt_0new/dphidt_0old-1.) >= precision) {
counter ++;
class_test(counter >= ppr->primordial_inflation_attractor_maxit,
ppm->error_message,
"could not converge after %d iterations: there exists no attractor solution near phi=%g. Potential probably too steep in this region, or precision parameter primordial_inflation_attractor_precision=%g too small",
counter,
phi_0,
precision);
dphidt_0old = dphidt_0new;
/* take one step in phi, corresponding roughly to adding one more
e-fold of inflation */
phi=phi+dV_0/V_0/16./_PI_;
/* fix the initial phi' to the slow-roll prediction in that point,
and initialize other relevant quantities */
class_call(primordial_inflation_check_potential(ppm,phi,&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
a = 1.;
dphidt = -dV/3./sqrt((8.*_PI_/3.)*V);
y[ppm->index_in_a]=a;
y[ppm->index_in_phi]=phi;
y[ppm->index_in_dphi]=a*dphidt;
/* evolve the background equations until phi_0 is reached */
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
phi_0,
_TRUE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
/* compute phi' in phi_0, this is the new point in the series
which convergence we want to check */
dphidt_0new = y[ppm->index_in_dphi]/y[ppm->index_in_a];
}
/* if we have converged and found the attractor, we take the last
value of phi' in phi_0 to be the correct one for the attractor
solution */
*dphidt_0 = dphidt_0new;
*H_0 = sqrt((8.*_PI_/3.)*(0.5*dphidt_0new*dphidt_0new+V_0));
if (ppm->primordial_verbose > 1) {
printf(" (attractor found in phi=%g with phi'=%g, H=%g)\n",phi_0,*dphidt_0,*H_0);
}
return _SUCCESS_;
}
/**
* Routine integrating background equations only, from initial values
* stored in y, to a final value (if target = _aH_, until aH =
* aH_stop; if target = _phi_, till phi = phi_stop; if target =
* _end_inflation_, until \f$ d^2a/dt^2 = 0\f$ (here t = proper time)). In
* output, y contains the final background values. In addition, if
* check_epsilon is true, the routine controls at each step that the
* expansion is accelerated and that inflation holds (wepsilon>1),
* otherwise it returns an error. Thanks to the last argument, it is
* also possible to specify whether the integration should be carried
* forward or backward in time. For the inflation_H case, only a 1st
* order differential equation is involved, so the forward and
* backward case can be done exactly without problems. For the
* inflation_V case, the equation of motion is 2nd order. What the
* module will do in the backward case is to search for an approximate
* solution, corresponding to the (first-order) attractor inflationary
* solution. This approximate backward solution is used in order to
* estimate some initial times, but the approximation made here will
* never impact the final result: the module is written in such a way
* that after using this approximation, the code always computes (and
* relies on) the exact forward solution.
*
* @param ppm Input: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param target Input: whether the goal is to reach a given aH or \f$ \phi \f$
* @param stop Input: the target value of either aH or \f$ \phi \f$
* @param check_epsilon Input: whether we should impose inflation (epsilon>1) at each step
* @param direction Input: whether we should integrate forward or backward in time
* @param time Input: definition of time (proper or conformal)
* @return the error status
*/
int primordial_inflation_evolve_background(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy,
enum target_quantity target,
double stop,
short check_epsilon,
enum integration_direction direction,
enum time_definition time
) {
struct primordial_inflation_parameters_and_workspace pipaw;
struct generic_integrator_workspace gi;
double tau_start,tau_end,dtau=0.;
double H,dH,ddH,dddH;
double epsilon,epsilon_old;
double quantity=0.;
double V,dV,ddV;
double sign_dtau=0.;
pipaw.ppm = ppm;
pipaw.N = ppm->in_bg_size;
if ((direction == backward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
// -1 to remove the differential equation for phi', since we stick to the attractor
pipaw.N -= 1;
}
pipaw.integrate = direction;
pipaw.time = time;
switch (direction) {
case forward:
sign_dtau = 1.;
break;
case backward:
sign_dtau = -1.;
break;
}
class_call(initialize_generic_integrator(pipaw.N,&gi),
gi.error_message,
ppm->error_message);
/* at starting point, compute eventually epsilon */
if (check_epsilon == _TRUE_) {
class_call(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message);
}
/* at starting point, compute the stepsize dtau */
tau_end = 0;
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// in this case, the goal is to reach d2a/dt2 = 0 (end of accelerated expansion)
stop = 0.;
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
// check that we are in the right case
class_test(ppm->primordial_spec_type != inflation_V_end,
ppm->error_message,
"the target _end_inflation_ is only coded to work with inflation_V_end (but could be generalized if needed)");
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
/* loop over time steps, checking that there will be no overshooting */
while (sign_dtau*(quantity - stop) < 0.) {
/* check that V(phi) or H(phi) do not take forbidden values
(negative or positive derivative) */
if ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end)) {
class_call(primordial_inflation_check_potential(ppm,
y[ppm->index_in_phi],
&V,
&dV,
&ddV),
ppm->error_message,
ppm->error_message);
}
else {
class_call(primordial_inflation_check_hubble(ppm,
y[ppm->index_in_phi],
&H,
&dH,
&ddH,
&dddH),
ppm->error_message,
ppm->error_message);
}
/* take one time step */
tau_start = tau_end;
tau_end = tau_start + dtau;
// mind the fabs(...) below (works for both forward and backward integration)
class_test(fabs(dtau/tau_start) < ppr->smallest_allowed_variation,
ppm->error_message,
"integration step: relative change in time =%e < machine precision : leads either to numerical error or infinite loop",dtau/tau_start);
class_call(generic_integrator(primordial_inflation_derivs,
tau_start,
tau_end,
y,
&pipaw,
ppr->primordial_inflation_tol_integration,
ppr->smallest_allowed_variation,
&gi),
gi.error_message,
ppm->error_message);
/* eventually, check that epsilon is not becoming greater than one */
if (check_epsilon == _TRUE_) {
epsilon_old = epsilon;
class_call_except(primordial_inflation_get_epsilon(ppm,
y[ppm->index_in_phi],
&epsilon),
ppm->error_message,
ppm->error_message,
cleanup_generic_integrator(&gi));
class_test_except((epsilon > 1) && (epsilon_old <= 1),
ppm->error_message,
cleanup_generic_integrator(&gi),
"Inflaton evolution crosses the border from epsilon<1 to epsilon>1 at phi=%g. Inflation disrupted during the observable e-folds",
y[ppm->index_in_phi]);
}
/* recompute new value of next conformal time step */
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// compute timestep (if time = conformal, dtau is the conformal time step,
// if time = proper, dtau is in fact dt, the proper time step)
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V) || (ppm->primordial_spec_type == inflation_V_end))) {
dtau = ppr->primordial_inflation_bg_stepsize
*MIN(y[ppm->index_in_a]/dy[ppm->index_in_a],fabs(y[ppm->index_in_dphi]/dy[ppm->index_in_dphi]));
}
else {
// minus sign for backward in time
dtau = sign_dtau * ppr->primordial_inflation_bg_stepsize*y[ppm->index_in_a]/dy[ppm->index_in_a];
}
/* expected value of target quantity after the next step */
switch (target) {
case _aH_:
// next (approximate) value of aH after next step
// (a+[da/dx]*dx) H = aH (1 + [da/dx] / a dx)
// where dtau can be conformal or proper time
quantity = dy[ppm->index_in_a] * (1.+ dy[ppm->index_in_a]/y[ppm->index_in_a] * dtau);
if (time == conformal) quantity /= y[ppm->index_in_a];
break;
case _phi_:
// next (approximate) value of phi after next step
quantity = y[ppm->index_in_phi]+dy[ppm->index_in_phi]*dtau;
break;
case _end_inflation_:
// current value of quantity = - d2a/dt2 /a = [- (a'/a)^2 + 3/2 8pi/3 phi'^2]/a^2
quantity = -pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2) + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi];
if (time == conformal) quantity /= pow(y[ppm->index_in_a],2);
break;
case _a_:
// next (approximate) value of a after next step
quantity = y[ppm->index_in_a]+dy[ppm->index_in_a]*dtau;
break;
}
}
/* won't use the integrator anymore */
class_call(cleanup_generic_integrator(&gi),
gi.error_message,
ppm->error_message);
/* Perform one last step with a simple trapezoidal integral. This
will bring exactly phi or a forward to phi_stop or a_stop, or
approximately aH forward to aH_stop, or approximately [-d2a/dt2
/a] backward to zero. */
switch (target) {
case _aH_:
switch (time){
case proper:
dtau = (stop/dy[ppm->index_in_a]-1.)/dy[ppm->index_in_a];
break;
case conformal:
dtau = (stop/(dy[ppm->index_in_a]/y[ppm->index_in_a])-1.)/(dy[ppm->index_in_a]/y[ppm->index_in_a]);
break;
}
break;
case _phi_:
dtau = (stop-y[ppm->index_in_phi])/dy[ppm->index_in_phi];
break;
case _end_inflation_:
class_call(primordial_inflation_check_potential(ppm,y[ppm->index_in_phi],&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
// We can easily pull back quantity=-d2a/dt2 /a by noticing that
// d(quantity)/dtau = 8piG phi' phi'' / a^2 (exact relation!)
// or
// d(quantity)/dtau = 8piG phi^dot (a phi^dot)^dot = 8piG phi^dot (a^dot phi^dot+ a phi^dotdot)
// By taking the step dtau = - quantity / [d(quantity)/dtau] we nearly reach quantity=0 (end of inflation), up to very good approximation
switch (time){
case proper:
dtau = -quantity/(8.*_PI_*dy[ppm->index_in_phi]*(dy[ppm->index_in_a]*dy[ppm->index_in_phi]+y[ppm->index_in_a]*dy[ppm->index_in_dphi]));
break;
case conformal:
dtau = -quantity/(8.*_PI_/y[ppm->index_in_a]/y[ppm->index_in_a]*dy[ppm->index_in_phi]*dy[ppm->index_in_dphi]);
break;
}
break;
case _a_:
dtau = (stop-y[ppm->index_in_a])/dy[ppm->index_in_a];
break;
}
y[ppm->index_in_a] += dy[ppm->index_in_a]*dtau;
y[ppm->index_in_phi] += dy[ppm->index_in_phi]*dtau;
if ((direction == forward) && ((ppm->primordial_spec_type == inflation_V)||(ppm->primordial_spec_type == inflation_V_end)))
y[ppm->index_in_dphi] += dy[ppm->index_in_dphi]*dtau;
// this last step updates also the dy[]
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
// uncomment if you want to test that the routine really reached the point at which d2a/dt2=0
/*
if (target == _end_inflation_) {
class_call(primordial_inflation_derivs(tau_end,
y,
dy,
&pipaw,
ppm->error_message),
ppm->error_message,
ppm->error_message);
aH = dy[ppm->index_in_a]/y[ppm->index_in_a];
quantity = (-aH*aH + 4*_PI_ * y[ppm->index_in_dphi] * y[ppm->index_in_dphi])/y[ppm->index_in_a]/y[ppm->index_in_a];
if (ppm->primordial_verbose>1)
printf(" (-d2a/dt2 /a = %e)\n",quantity);
}
*/
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of potential. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during inflation. So
* the slope had to be always negative or always positive... we took
* the first option.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param V Output: inflaton potential in units of \f$ Mp^4\f$
* @param dV Output: first derivative of inflaton potential wrt the field
* @param ddV Output: second derivative of inflaton potential wrt the field
* @return the error status
*/
int primordial_inflation_check_potential(
struct primordial * ppm,
double phi,
double * V,
double * dV,
double * ddV
) {
class_call(primordial_inflation_potential(ppm,phi,V,dV,ddV),
ppm->error_message,
ppm->error_message);
class_test(*V <= 0.,
ppm->error_message,
"This potential becomes negative at phi=%g, before the end of observable inflation. It cannot be treated by this code",
phi);
class_test(*dV >= 0.,
ppm->error_message,
"All the code is written for the case dV/dphi<0. Here, in phi=%g, we have dV/dphi=%g. This potential cannot be treated by this code",
phi,*dV);
return _SUCCESS_;
}
/**
* Routine checking positivity and negative slope of \f$ H(\phi)\f$. The
* negative slope is an arbitrary choice. Currently the code can only
* deal with monotonic variations of the inflaton during
* inflation. And H can only decrease with time. So the slope \f$ dH/d\phi\f$
* has to be always negative or always positive... we took the first
* option: phi increases, H decreases.
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to perform the check
* @param H Output: Hubble parameters in units of Mp
* @param dH Output: \f$ dH / d\phi \f$
* @param ddH Output: \f$ d^2H / d\phi^2 \f$
* @param dddH Output: \f$ d^3H / d\phi^3 \f$
* @return the error status
*/
int primordial_inflation_check_hubble(
struct primordial * ppm,
double phi,
double * H,
double * dH,
double * ddH,
double * dddH
) {
class_call(primordial_inflation_hubble(ppm,
phi,
H,dH,ddH,dddH),
ppm->error_message,
ppm->error_message);
class_test(*H < 0.,
ppm->error_message,
"this H(phi) is not physical. H = %e",
*H);
class_test(*dH > 0.,
ppm->error_message,
"this H(phi) is not decreasing with growing phi. dH/dphi = %e",
*dH);
return _SUCCESS_;
}
/**
* Routine computing the first slow-roll parameter epsilon
*
* @param ppm Input: pointer to primordial structure
* @param phi Input: field value where to compute epsilon
* @param epsilon Output: result
* @return the error status
*/
int primordial_inflation_get_epsilon(
struct primordial * ppm,
double phi,
double * epsilon
) {
double V,dV,ddV;
double H,dH,ddH,dddH;
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
phi,
&V,&dV,&ddV),
ppm->error_message,
ppm->error_message);
*epsilon = 1./16./_PI_*pow(dV/V,2);
//*eta = 1./8./pi*(ddV/V)
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
phi,
&H,&dH,&ddH,&dddH),
ppm->error_message,
ppm->error_message);
*epsilon = 1./4./_PI_*pow(dH/H,2);
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
return _SUCCESS_;
}
/**
* Routine searching phi_pivot when a given amount of inflation is requested.
*
* @param ppm Input/output: pointer to primordial structure
* @param ppr Input: pointer to precision structure
* @param y Input: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @return the error status
*/
int primordial_inflation_find_phi_pivot(
struct primordial * ppm,
struct precision * ppr,
double * y,
double * dy
) {
/** Summary: */
/** - define local variables */
double epsilon,dphi;
double phi_try,H_try,dphidt_try,ratio_try=0.;
double phi_left,phi_right,phi_mid;
double phi_small_epsilon,phi_stop;
double dphidt_small_epsilon;
double H_small_epsilon;
double aH_ratio_after_small_epsilon=0.;
double a_ratio_after_small_epsilon=0.;
double target=0.;
double a_pivot,aH_pivot;
double rho_end;
double h;
double H0;
double rho_c0;
double sigma_B;
double Omega_g0;
double Omega_r0;
/** - check whether in vicinity of phi_end, inflation is still ongoing */
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-ppr->primordial_inflation_end_dphi,&epsilon),
ppm->error_message,
ppm->error_message);
/** - case in which epsilon>1: hence we must find the value phi_stop <
phi_end where inflation ends up naturally */
if (epsilon > 1.) {
// assume that inflation ends up naturally
/** - --> find latest value of the field such that epsilon = primordial_inflation_small_epsilon (default: 0.1) */
/** - --> bracketing right-hand value is phi_end (but the potential will not be evaluated exactly there, only closeby */
phi_right = ppm->phi_end;
/** - --> bracketing left-hand value is found by iterating with logarithmic step until epsilon < primordial_inflation_small_epsilon */
dphi = ppr->primordial_inflation_end_dphi;
do {
dphi *= ppr->primordial_inflation_end_logstep;
class_call(primordial_inflation_get_epsilon(ppm,ppm->phi_end-dphi,&epsilon),
ppm->error_message,
ppm->error_message);
} while (epsilon > ppr->primordial_inflation_small_epsilon);
phi_left = ppm->phi_end-dphi;
/** - --> find value such that epsilon = primordial_inflation_small_epsilon by bisection */
do {
phi_mid = 0.5*(phi_left+phi_right);
class_call(primordial_inflation_get_epsilon(ppm,phi_mid,&epsilon),
ppm->error_message,
ppm->error_message);
if (epsilon < ppr->primordial_inflation_small_epsilon) phi_left=phi_mid;
else phi_right=phi_mid;
} while (fabs(epsilon-ppr->primordial_inflation_small_epsilon) > ppr->primordial_inflation_small_epsilon_tol);
/** - --> value found and stored as phi_small_epsilon */
phi_small_epsilon = phi_mid;
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_small_epsilon,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> compute amount of inflation between this phi_small_epsilon and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
y[ppm->index_in_dphi]=y[ppm->index_in_a]*dphidt_small_epsilon;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
conformal),
ppm->error_message,
ppm->error_message);
// we have used here conformal time, so aH = dy[a]/y[a]
aH_ratio_after_small_epsilon = dy[ppm->index_in_a]/y[ppm->index_in_a]/H_small_epsilon;
a_ratio_after_small_epsilon = y[ppm->index_in_a];
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
/* get the target value of ln_aH_ratio */
rho_end = 2./8./_PI_*pow(dy[ppm->index_in_a]/y[ppm->index_in_a],2);
rho_end = 8*_PI_/3.*rho_end/(_G_*_h_P_/pow(_c_,3))*pow(_Mpc_over_m_,2);
h = 0.7;
H0 = h * 1.e5 / _c_;
rho_c0 = pow(H0,2);
sigma_B = 2. * pow(_PI_,5) * pow(_k_B_,4) / 15. / pow(_h_P_,3) / pow(_c_,2);
Omega_g0 = (4.*sigma_B/_c_*pow(2.726,4.)) / (3.*_c_*_c_*1.e10*h*h/_Mpc_over_m_/_Mpc_over_m_/8./_PI_/_G_);
Omega_r0 = 3.046*7./8.*pow(4./11.,4./3.)*Omega_g0;
target = log(H0/0.05*pow(Omega_r0,0.5)*pow(2./100.,1./12.)*pow(rho_end/rho_c0,0.25));
//fprintf(stderr,"auto: log(aH_end/aH_*)=%e\n",target);
break;
case ln_aH_ratio:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(aH_end/aH_*)=%e\n",target);
break;
case N_star:
target = ppm->phi_pivot_target;
//fprintf(stderr,"fixed: log(a_end/a_*)=%e\n",target);
break;
}
/** - --> by starting from phi_small_epsilon and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_small_epsilon;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/* we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/* - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_end_inflation_,
0.,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
/** - case in which epsilon<1: */
else {
/** - --> find inflationary attractor in phi_small_epsilon (should exist since epsilon<1 there) */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
ppm->phi_end,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_small_epsilon,
&dphidt_small_epsilon),
ppm->error_message,
ppm->error_message);
/** - --> by starting from phi_end and integrating an approximate
solution backward in time, try to estimate roughly a value close
to phi_pivot but a bit smaller. This is done by trying to reach
an amount of inflation equal to the requested one, minus the
amount after phi_small_epsilon, and plus
primordial_inflation_extra_efolds efolds (default: two). Note
that it is not aggressive to require two extra e-folds of
inflation before the pivot, since the calculation of the spectrum
in the observable range will require even more. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= ppm->phi_end;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_small_epsilon/exp(target+ppr->primordial_inflation_extra_efolds)*aH_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
1./exp(target+ppr->primordial_inflation_extra_efolds)*a_ratio_after_small_epsilon,
_TRUE_,
backward,
conformal),
ppm->error_message,
ppm->error_message);
break;
}
/** - --> we now have a value phi_try believed to be close to and slightly smaller than phi_pivot */
phi_try = y[ppm->index_in_phi];
/** - --> find attractor in phi_try */
class_call(primordial_inflation_find_attractor(ppm,
ppr,
phi_try,
ppr->primordial_inflation_attractor_precision_initial,
y,
dy,
&H_try,
&dphidt_try),
ppm->error_message,
ppm->error_message);
/** - --> check the total amount of inflation between phi_try and the end of inflation */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
// aH_ratio (we have used here proper time, so aH = dy[a])
ratio_try = dy[ppm->index_in_a]/H_try;
break;
case N_star:
// a_ratio
ratio_try = y[ppm->index_in_a];
break;
}
class_test(log(ratio_try) < target,
ppm->error_message,
"phi_try not small enough, log(aH_stop/aH_try) or log(a_stop/a_try) (depending on what you asked) is equal to %e instead of requested %e; must write here a loop to deal automatically with this situation (by decreasing phi_try iteratively), or must increase precision parameter primordial_inflation_extra_efolds",
log(ratio_try),
target);
phi_stop = y[1];
if (ppm->primordial_verbose > 1)
printf(" (inflation stops in phi_stop = %e)\n",phi_stop);
/** - --> go back to phi_try, and now find phi_pivot such that the amount
of inflation between phi_pivot and the end of inflation is
exactly the one requested. */
y[ppm->index_in_a]=1.;
y[ppm->index_in_phi]= phi_try;
y[ppm->index_in_dphi]= dphidt_try;
switch (ppm->phi_pivot_method) {
case ln_aH_ratio_auto:
case ln_aH_ratio:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_aH_,
H_try*ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
case N_star:
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_a_,
ratio_try/exp(target),
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
break;
}
ppm->phi_pivot = y[1];
if (ppm->primordial_verbose > 1) {
printf(" (reached phi_pivot=%e)\n",ppm->phi_pivot);
/** - --> In verbose mode, check that phi_pivot is correct. Done by
restarting from phi_pivot and going again till the end of
inflation. */
aH_pivot = dy[0];
a_pivot = y[0];
class_call(primordial_inflation_evolve_background(ppm,
ppr,
y,
dy,
_phi_,
ppm->phi_end,
_FALSE_,
forward,
proper),
ppm->error_message,
ppm->error_message);
printf(" (from phi_pivot till the end, ln(aH_2/aH_1) = %e, ln(a_2/a_1) = %e)\n",log(dy[0]/aH_pivot),log(y[0]/a_pivot));
}
}
return _SUCCESS_;
}
/**
* Routine returning derivative of system of background/perturbation
* variables. Like other routines used by the generic integrator
* (background_derivs, thermodynamics_derivs, perturb_derivs), this
* routine has a generic list of arguments, and a slightly different
* error management, with the error message returned directly in an
* ErrMsg field.
*
* @param tau Input: time (not used explicitly inside the routine, but requested by the generic integrator)
* @param y Input/output: running vector of background variables, already allocated and initialized
* @param dy Input: running vector of background derivatives, already allocated
* @param parameters_and_workspace Input: all necessary input variables apart from y
* @param error_message Output: error message
* @return the error status
*/
int primordial_inflation_derivs(
double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct primordial_inflation_parameters_and_workspace * ppipaw;
struct primordial * ppm;
ppipaw = parameters_and_workspace;
ppm = ppipaw->ppm;
// a2
ppipaw->a2=y[ppm->index_in_a]*y[ppm->index_in_a];
// BACKGROUND
switch (ppm->primordial_spec_type) {
case inflation_V:
case inflation_V_end:
class_call(primordial_inflation_potential(ppm,
y[ppm->index_in_phi],
&(ppipaw->V),
&(ppipaw->dV),
&(ppipaw->ddV)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->integrate) {
case forward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->a2*ppipaw->V));
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dtau
dy[ppm->index_in_dphi]=-2.*ppipaw->aH*y[ppm->index_in_dphi]-ppipaw->a2*ppipaw->dV;
break;
case proper:
// a H = adot
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*(0.5*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]+ppipaw->V));
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]=y[ppm->index_in_dphi];
// 3: dphi/dt
dy[ppm->index_in_dphi]=-3.*ppipaw->aH/y[ppm->index_in_a]*y[ppm->index_in_dphi]-ppipaw->dV;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z=
2*ppipaw->aH*ppipaw->aH
- ppipaw->a2*ppipaw->ddV
- 4.*_PI_*(7.*y[ppm->index_in_dphi]*y[ppm->index_in_dphi]
+4.*y[ppm->index_in_dphi]/ppipaw->aH*ppipaw->a2*ppipaw->dV)
+32.*_PI_*_PI_*pow(y[ppm->index_in_dphi],4)/pow(ppipaw->aH,2);
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a=2.*ppipaw->aH*ppipaw->aH - 4.*_PI_*y[ppm->index_in_dphi]*y[ppm->index_in_dphi];
break;
// For backward integration of approximate slow-roll solution:
// Neglect kinetic energy of the field phi'^2/(2a^2) w.r.t. potential energy V
// Neglect phi'' w.r.t 2aHphi', reducing 2nd order Klein-Gordon to approximate 1st-order
case backward:
switch (ppipaw->time) {
case conformal:
// a H = a'/a
ppipaw->aH = sqrt((8*_PI_/3.)*ppipaw->a2*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->a2*ppipaw->dV/3./ppipaw->aH;
break;
case proper:
// a H = da/dt
ppipaw->aH = y[ppm->index_in_a]*sqrt((8*_PI_/3.)*ppipaw->V);
// 1: a
dy[ppm->index_in_a]=ppipaw->aH;
// 2: phi
dy[ppm->index_in_phi]= -ppipaw->dV/3./ppipaw->aH*y[ppm->index_in_a];
break;
}
break;
}
break;
case inflation_H:
class_call(primordial_inflation_hubble(ppm,
y[ppm->index_in_phi],
&(ppipaw->H),
&(ppipaw->dH),
&(ppipaw->ddH),
&(ppipaw->dddH)),
ppm->error_message,
ppm->error_message);
switch (ppipaw->time) {
case conformal:
// 1: a
dy[ppm->index_in_a]=ppipaw->a2*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*y[ppm->index_in_a]*ppipaw->dH;
break;
case proper:
// 1: a
dy[ppm->index_in_a]=y[ppm->index_in_a]*ppipaw->H;
// 2: phi
dy[ppm->index_in_phi]=-1./4./_PI_*ppipaw->dH;
break;
}
// z''/z (assumes that conformal time is requested)
ppipaw->zpp_over_z =
2. *ppipaw->a2*ppipaw->H*ppipaw->H
-3./4./_PI_ *ppipaw->a2*ppipaw->H*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->ddH*ppipaw->ddH
+1./16./_PI_/_PI_*ppipaw->a2*ppipaw->dH*ppipaw->dddH
-1./4./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->ddH/ppipaw->H
+1./2./_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH
+1./8./_PI_/_PI_ *ppipaw->a2*ppipaw->dH*ppipaw->dH*ppipaw->dH*ppipaw->dH/ppipaw->H/ppipaw->H;
// a''/a (assumes that conformal time is requested)
ppipaw->app_over_a = 2.*ppipaw->a2*ppipaw->H*ppipaw->H
-4.*_PI_*dy[ppm->index_in_phi]*dy[ppm->index_in_phi];
break;
default:
class_stop(ppm->error_message,"ppm->primordial_spec_type=%d different from possible relevant cases",ppm->primordial_spec_type);
break;
}
if (ppipaw->N <= ppm->in_bg_size) // mind the <= instead of ==, necessary because for backward integration 1 equation is removed
return _SUCCESS_;
// PERTURBATIONS
class_test(ppipaw->time == proper,
ppm->error_message,
"For inflaton perturbations, only conformal time is coded.");
// SCALARS
// 4: ksi_re
dy[ppm->index_in_ksi_re]=y[ppm->index_in_dksi_re];
// 5: ksi_im
dy[ppm->index_in_ksi_im]=y[ppm->index_in_dksi_im];
// 6: d ksi_re / dtau
dy[ppm->index_in_dksi_re]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_re];
// 7: d ksi_im / dtau
dy[ppm->index_in_dksi_im]=-(ppipaw->k*ppipaw->k-ppipaw->zpp_over_z)*y[ppm->index_in_ksi_im];
// TENSORS
// 8: ah_re
dy[ppm->index_in_ah_re]=y[ppm->index_in_dah_re];
// 9: ah_im
dy[ppm->index_in_ah_im]=y[ppm->index_in_dah_im];
// 10: d ah_re / dtau
dy[ppm->index_in_dah_re]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_re];
// 11: d ah_im / dtau
dy[ppm->index_in_dah_im]=-(ppipaw->k*ppipaw->k-ppipaw->app_over_a)*y[ppm->index_in_ah_im];
return _SUCCESS_;
}
/**
* This routine reads the primordial spectrum from an external command,
* and stores the tabulated values.
* The sampling of the k's given by the external command is preserved.
*
* Author: Jesus Torrado (torradocacho@lorentz.leidenuniv.nl)
* Date: 2013-12-20
*
* @param ppt Input/output: pointer to perturbation structure
* @param ppm Input/output: pointer to primordial structure
* @return the error status
*/
int primordial_external_spectrum_init(
struct perturbs * ppt,
struct primordial * ppm
) {
/** Summary: */
char arguments[_ARGUMENT_LENGTH_MAX_];
char line[_LINE_LENGTH_MAX_];
char command_with_arguments[2*_ARGUMENT_LENGTH_MAX_];
FILE *process;
int n_data_guess, n_data = 0;
double *k = NULL, *pks = NULL, *pkt = NULL, *tmp = NULL;
double this_k, this_pks, this_pkt;
int status;
int index_k;
/** - Initialization */
/* Prepare the data (with some initial size) */
n_data_guess = 100;
k = (double *)malloc(n_data_guess*sizeof(double));
pks = (double *)malloc(n_data_guess*sizeof(double));
if (ppt->has_tensors == _TRUE_)
pkt = (double *)malloc(n_data_guess*sizeof(double));
/* Prepare the command */
/* If the command is just a "cat", no arguments need to be passed */
if(strncmp("cat ", ppm->command, 4) == 0) {
sprintf(arguments, " ");
}
/* otherwise pass the list of arguments */
else {
sprintf(arguments, " %g %g %g %g %g %g %g %g %g %g",
ppm->custom1, ppm->custom2, ppm->custom3, ppm->custom4, ppm->custom5,
ppm->custom6, ppm->custom7, ppm->custom8, ppm->custom9, ppm->custom10);
}
/* write the actual command in a string */
sprintf(command_with_arguments, "%s %s", ppm->command, arguments);
if (ppm->primordial_verbose > 0)
printf(" -> running: %s\n",command_with_arguments);
/** - Launch the command and retrieve the output */
/* Launch the process */
process = popen(command_with_arguments, "r");
class_test(process == NULL,
ppm->error_message,
"The program failed to set the environment for the external command. Maybe you ran out of memory.");
/* Read output and store it */
while (fgets(line, sizeof(line)-1, process) != NULL) {
if (ppt->has_tensors == _TRUE_) {
sscanf(line, "%lf %lf %lf", &this_k, &this_pks, &this_pkt);
}
else {
sscanf(line, "%lf %lf", &this_k, &this_pks);
}
/* Standard technique in C: if too many data, double the size of the vectors */
/* (it is faster and safer that reallocating every new line) */
if((n_data+1) > n_data_guess) {
n_data_guess *= 2;
tmp = (double *)realloc(k, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
k = tmp;
tmp = (double *)realloc(pks, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pks = tmp;
if (ppt->has_tensors == _TRUE_) {
tmp = (double *)realloc(pkt, n_data_guess*sizeof(double));
class_test(tmp == NULL,
ppm->error_message,
"Error allocating memory to read the external spectrum.\n");
pkt = tmp;
};
};
/* Store */
k [n_data] = this_k;
pks[n_data] = this_pks;
if (ppt->has_tensors == _TRUE_) {
pkt[n_data] = this_pkt;
}
n_data++;
/* Check ascending order of the k's */
if(n_data>1) {
class_test(k[n_data-1] <= k[n_data-2],
ppm->error_message,
"The k's are not strictly sorted in ascending order, "
"as it is required for the calculation of the splines.\n");
}
}
/* Close the process */
status = pclose(process);
class_test(status != 0.,
ppm->error_message,
"The attempt to launch the external command was unsuccessful. "
"Try doing it by hand to check for errors.");
/* Test limits of the k's */
class_test(k[1] > ppt->k_min,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points before the minimum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_min);
class_test(k[n_data-2] < ppt->k_max,
ppm->error_message,
"Your table for the primordial spectrum does not have "
"at least 2 points after the maximum value of k: %e . "
"The splines interpolation would not be safe.",ppt->k_max);
/** - Store the read results into CLASS structures */
ppm->lnk_size = n_data;
/** - Make room */
class_realloc(ppm->lnk,
ppm->lnk,
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->lnpk[ppt->index_md_scalars],
ppm->lnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_scalars],
ppm->ddlnpk[ppt->index_md_scalars],
ppm->lnk_size*sizeof(double),
ppm->error_message);
if (ppt->has_tensors == _TRUE_) {
class_realloc(ppm->lnpk[ppt->index_md_tensors],
ppm->lnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
class_realloc(ppm->ddlnpk[ppt->index_md_tensors],
ppm->ddlnpk[ppt->index_md_tensors],
ppm->lnk_size*sizeof(double),
ppm->error_message);
};
/** - Store values */
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
ppm->lnk[index_k] = log(k[index_k]);
ppm->lnpk[ppt->index_md_scalars][index_k] = log(pks[index_k]);
if (ppt->has_tensors == _TRUE_)
ppm->lnpk[ppt->index_md_tensors][index_k] = log(pkt[index_k]);
/* DEBUG (with tensors)
fprintf(stderr,"Storing[%d(+1) of %d]: \n k = %g == %g\n pks = %g == %g\n pkt = %g == %g\n",
index_k, n_data,
ppm->lnk[index_k], log(k[index_k]),
ppm->lnpk[ppt->index_md_scalars][index_k], log(pks[index_k]),
ppm->lnpk[ppt->index_md_tensors][index_k], log(pkt[index_k]));
*/
};
/** - Release the memory used locally */
free(k);
free(pks);
if (ppt->has_tensors == _TRUE_)
free(pkt);
/** - Tell CLASS that there are scalar (and tensor) modes */
ppm->is_non_zero[ppt->index_md_scalars][ppt->index_ic_ad] = _TRUE_;
if (ppt->has_tensors == _TRUE_)
ppm->is_non_zero[ppt->index_md_tensors][ppt->index_ic_ten] = _TRUE_;
return _SUCCESS_;
}
int primordial_output_titles(struct perturbs * ppt,
struct primordial * ppm,
char titles[_MAXTITLESTRINGLENGTH_]
){
class_store_columntitle(titles,"k [1/Mpc]",_TRUE_);
class_store_columntitle(titles,"P_scalar(k)",_TRUE_);
class_store_columntitle(titles,"P_tensor(k)",ppt->has_tensors);
return _SUCCESS_;
}
int primordial_output_data(struct perturbs * ppt,
struct primordial * ppm,
int number_of_titles,
double *data){
int index_k, storeidx;
double *dataptr;
for (index_k=0; index_k<ppm->lnk_size; index_k++) {
dataptr = data + index_k*number_of_titles;
storeidx = 0;
class_store_double(dataptr, exp(ppm->lnk[index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_scalars][index_k]), _TRUE_,storeidx);
class_store_double(dataptr, exp(ppm->lnpk[ppt->index_md_tensors][index_k]), ppt->has_tensors,storeidx);
}
return _SUCCESS_;
}
|
spikebinning.h | #include <pybind11/numpy.h>
#include <pybind11/stl.h>
#include <stdlib.h>
#include <stdint.h>
#include <queue>
#include <memory>
#include "MergeWrapper.h"
#include "NDArrayWrapper.h"
namespace py=pybind11;
template<class T>
using ContigNPArray = py::array_t<T, py::array::c_style | py::array::forcecast>;
int64_t bin_spikes_single_cell(
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> &spike_time_buffer,
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> &bin_write_buffer,
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> &bin_cutoff_times,
int64_t offset_below_idx) {
/*
* Bins spikes for a cell whose spike times are contained in spike_time_buffer
* according to bin edge times in bin_cutoff_times. Binned spikes are written
* into bin_write_buffer
*
* All times are in units of samples
*
* @param spike_time_buffer: wrapper on an array of shape (n_spikes, )
* @param bin_write_buffer: wrapper on an array of shape (n_bins, )
* @param bin_cutoff_times: wrapper on an array of shape (n_bins + 1, )
* @param offset_below: integer index into spike_time_buffer, corresponding to the
* index of any spike either at or earlier than the first spike that needs
* to be binned
*/
const int64_t n_bin_edges = bin_cutoff_times.shape[0];
const int64_t n_spikes_total = spike_time_buffer.shape[0];
int64_t start, end, n_spikes_in_bin;
start = bin_cutoff_times.valueAt(0);
while (offset_below_idx < n_spikes_total &&
spike_time_buffer.valueAt(offset_below_idx) < start)
++offset_below_idx;
for (int64_t i = 0; i < n_bin_edges - 1; ++i) {
start = bin_cutoff_times.valueAt(i);
end = bin_cutoff_times.valueAt(i + 1);
n_spikes_in_bin = 0;
while (offset_below_idx < n_spikes_total && spike_time_buffer.valueAt(offset_below_idx) < end) {
++offset_below_idx;
++n_spikes_in_bin;
}
bin_write_buffer.storeTo(n_spikes_in_bin, i);
}
return offset_below_idx;
}
template<typename T>
int64_t binary_search_index(
CNDArrayWrapper::StaticNDArrayWrapper<T, 1> &spike_time_buffer,
T time) {
/*
* Intended behavior: Finds the index of the element at or immediately
* after before_time
* Returns 0 if all elements occur after before_time
*
* Assumptions: the entries in spike_time_buffer are strictly increasing,
* since spike_time_buffer must be a valid spike train, and a cell can't
* spike twice in the same sample
*
* @param spike_time_buffer: wrapper with shape (n_spikes, ), corresponding to the
* spike times of a single cel
* @param time: the time to find
*/
int64_t arr_len = spike_time_buffer.shape[0];
int64_t low = 0, high = arr_len - 1; // inclusive
int64_t idx = (arr_len >> 1);
while (low <= high) {
T value = spike_time_buffer.valueAt(idx);
if (value == time) {
return idx;
} else if (value > time) {
high = idx - 1;
idx = ((high - low) >> 1) + low;
} else {
low = idx + 1;
idx = ((high - low) >> 1) + low;
}
}
return idx + 1;
}
using MulticellSpikeTrain = std::map<int64_t, ContigNPArray<int64_t>>;
using MultiDataset = std::tuple <MulticellSpikeTrain, std::vector<int64_t>, ContigNPArray<int64_t>>;
void _bin_spikes_into_buffer(
MulticellSpikeTrain spikes_by_cell_id,
std::vector<int64_t> cell_order,
ContigNPArray<int64_t> trial_bin_cutoffs,
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 3> output_wrapper) {
// figure out how many trials there are, and how many bins there are
py::buffer_info bin_info = trial_bin_cutoffs.request();
int64_t *bin_time_matrix_ptr = static_cast<int64_t *> (bin_info.ptr);
const int64_t n_trials = bin_info.shape[0];
const int64_t n_bin_cutoffs = bin_info.shape[1];
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 2> bin_time_wrapper(
bin_time_matrix_ptr,
{n_trials, n_bin_cutoffs});
// figure out how many cells there are
const int64_t n_cells = cell_order.size();
using Int64_1DWrapper = CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1>;
std::map <int64_t, std::unique_ptr<Int64_1DWrapper>> spike_time_wrapper_map{};
for (int64_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
int64_t cell_id = cell_order[cell_idx];
ContigNPArray<int64_t> spikes_for_current_cell = spikes_by_cell_id[cell_id];
py::buffer_info spike_time_info = spikes_for_current_cell.request();
std::array<int64_t, 1> spike_shape = {spike_time_info.shape[0]};
spike_time_wrapper_map[cell_id] = std::make_unique<Int64_1DWrapper>(
static_cast<int64_t *>(spike_time_info.ptr),
spike_shape);
}
#pragma omp parallel for
for (int64_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
int64_t cell_id = cell_order[cell_idx];
auto spike_time_wrapper = *spike_time_wrapper_map[cell_id];
int64_t trial_idx = 0;
int64_t spike_offset = binary_search_index<int64_t>(spike_time_wrapper,
bin_time_wrapper.valueAt(trial_idx, 0));
for (; trial_idx < n_trials; ++trial_idx) {
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> output_bin_wrapper = output_wrapper.slice<1>(
CNDArrayWrapper::makeIdxSlice(trial_idx),
CNDArrayWrapper::makeIdxSlice(cell_idx),
CNDArrayWrapper::makeAllSlice());
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> bin_cutoff_wrapper = bin_time_wrapper.slice<1>(
CNDArrayWrapper::makeIdxSlice(trial_idx),
CNDArrayWrapper::makeAllSlice());
int64_t trial_bin_start = bin_time_wrapper.valueAt(trial_idx, 0);
spike_offset = binary_search_index<int64_t>(spike_time_wrapper, trial_bin_start);
spike_offset = bin_spikes_single_cell(spike_time_wrapper, output_bin_wrapper,
bin_cutoff_wrapper, spike_offset);
}
}
}
ContigNPArray<int64_t> multidataset_bin_spikes_trials_parallel(
std::vector<MultiDataset> multiple_datasets) {
int64_t n_datasets = multiple_datasets.size();
std::vector<int64_t> dataset_lengths { };
std::vector<int64_t> dataset_offsets { };
auto first_tup = multiple_datasets[0];
auto first_trial_bin_cutoffs = std::get<2>(first_tup);
py::buffer_info first_bin_info = first_trial_bin_cutoffs.request();
int64_t n_bins = first_bin_info.shape[1] - 1;
auto first_cell_order = std::get<1>(first_tup);
int64_t n_cells = first_cell_order.size();
int64_t n_trials = 0;
for (int64_t i = 0; i < n_datasets; ++i) {
auto tup = multiple_datasets[i];
auto trial_bin_cutoffs = std::get<2>(tup);
py::buffer_info bin_info = trial_bin_cutoffs.request();
int64_t n_trials_dataset = bin_info.shape[0];
dataset_offsets.push_back(n_trials);
dataset_lengths.push_back(n_trials_dataset);
n_trials += n_trials_dataset;
}
// allocate the output binned times
auto output_buffer_info = py::buffer_info(
nullptr, /* Pointer to data (nullptr -> ask NumPy to allocate!) */
sizeof(int32_t), /* Size of one item */
py::format_descriptor<int64_t>::value, /* Buffer format */
3, /* How many dimensions? */
{n_trials, n_cells, n_bins}, /* Number of elements for each dimension */
{sizeof(int64_t) * n_bins * n_cells, sizeof(int64_t) * n_bins, sizeof(int64_t)} /* Strides for each dim */
);
ContigNPArray<int64_t> binned_output = ContigNPArray<int64_t>(output_buffer_info);
py::buffer_info output_info = binned_output.request();
int64_t *output_data_ptr = static_cast<int64_t *> (output_info.ptr);
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 3> total_output_wrapper(
output_data_ptr,
{n_trials, n_cells, n_bins});
for (int64_t i = 0; i < n_datasets; ++i) {
int64_t dataset_offset = dataset_offsets[i];
int64_t dataset_length = dataset_lengths[i];
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 3> output_wrapper = total_output_wrapper.slice<3>(
CNDArrayWrapper::makeRangeSlice(dataset_offset, dataset_offset + dataset_length),
CNDArrayWrapper::makeAllSlice(),
CNDArrayWrapper::makeAllSlice());
auto tup = multiple_datasets[i];
auto spikes_by_cell_id = std::get<0>(tup);
auto cell_order = std::get<1>(tup);
auto trial_bin_cutoffs = std::get<2>(tup);
_bin_spikes_into_buffer(spikes_by_cell_id, cell_order, trial_bin_cutoffs, output_wrapper);
}
return binned_output;
}
ContigNPArray<int64_t> bin_spikes_trials_parallel(
std::map <int64_t, ContigNPArray<int64_t>> spikes_by_cell_id,
std::vector <int64_t> cell_order,
ContigNPArray<int64_t> trial_bin_cutoffs) {
// figure out how many trials there are, and how many bins there are
py::buffer_info bin_info = trial_bin_cutoffs.request();
int64_t *bin_time_matrix_ptr = static_cast<int64_t *> (bin_info.ptr);
const int64_t n_trials = bin_info.shape[0];
const int64_t n_bin_cutoffs = bin_info.shape[1];
const int64_t n_bins = n_bin_cutoffs - 1;
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 2> bin_time_wrapper(
bin_time_matrix_ptr,
{n_trials, n_bin_cutoffs});
// figure out how many cells there are
const int64_t n_cells = cell_order.size();
// allocate the output binned times
auto output_buffer_info = py::buffer_info(
nullptr, /* Pointer to data (nullptr -> ask NumPy to allocate!) */
sizeof(int64_t), /* Size of one item */
py::format_descriptor<int64_t>::value, /* Buffer format */
3, /* How many dimensions? */
{n_trials, n_cells, n_bins}, /* Number of elements for each dimension */
{sizeof(int64_t) * n_bins * n_cells, sizeof(int64_t) * n_bins, sizeof(int64_t)} /* Strides for each dim */
);
ContigNPArray<int64_t> binned_output = ContigNPArray<int64_t>(output_buffer_info);
py::buffer_info output_info = binned_output.request();
int64_t *output_data_ptr = static_cast<int64_t *> (output_info.ptr);
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 3> output_wrapper(
output_data_ptr,
{n_trials, n_cells, n_bins});
_bin_spikes_into_buffer(spikes_by_cell_id, cell_order, trial_bin_cutoffs, output_wrapper);
return binned_output;
}
ContigNPArray<int64_t> bin_spikes_trials(
py::dict &spikes_by_cell_id,
py::list &cell_order,
ContigNPArray<int64_t> &trial_bin_cutoffs) {
// figure out how many trials there are, and how many bins there are
py::buffer_info bin_info = trial_bin_cutoffs.request();
int64_t *bin_time_matrix_ptr = static_cast<int64_t *> (bin_info.ptr);
const int64_t n_trials = bin_info.shape[0];
const int64_t n_bin_cutoffs = bin_info.shape[1];
const int64_t n_bins = n_bin_cutoffs - 1;
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 2> bin_time_wrapper(
bin_time_matrix_ptr,
{n_trials, n_bin_cutoffs});
// figure out how many cells there are
const int64_t n_cells = cell_order.size();
// allocate the output binned times
auto output_buffer_info = py::buffer_info(
nullptr, /* Pointer to data (nullptr -> ask NumPy to allocate!) */
sizeof(int64_t), /* Size of one item */
py::format_descriptor<int64_t>::value, /* Buffer format */
3, /* How many dimensions? */
{n_trials, n_cells, n_bins}, /* Number of elements for each dimension */
{sizeof(int64_t) * n_bins * n_cells, sizeof(int64_t) * n_bins, sizeof(int64_t)} /* Strides for each dim */
);
ContigNPArray<int64_t> binned_output = ContigNPArray<int64_t>(output_buffer_info);
py::buffer_info output_info = binned_output.request();
int64_t *output_data_ptr = static_cast<int64_t *> (output_info.ptr);
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 3> output_wrapper(
output_data_ptr,
{n_trials, n_cells, n_bins});
/*
* Algorithm: loop over each cell
* For each trial, bin the spikes
*/
for (int64_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
py::object cell_id_pykey = cell_order[cell_idx];
ContigNPArray<int64_t> spikes_for_current_cell = py::cast<ContigNPArray<int64_t >>(
spikes_by_cell_id[cell_id_pykey]);
py::buffer_info spike_time_info = spikes_for_current_cell.request();
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> spike_time_wrapper(
static_cast<int64_t *>(spike_time_info.ptr),
{spike_time_info.shape[0]});
int64_t trial_idx = 0;
int64_t spike_offset = binary_search_index<int64_t>(spike_time_wrapper,
bin_time_wrapper.valueAt(trial_idx, 0));
for (; trial_idx < n_trials; ++trial_idx) {
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> output_bin_wrapper = output_wrapper.slice<1>(
CNDArrayWrapper::makeIdxSlice(trial_idx),
CNDArrayWrapper::makeIdxSlice(cell_idx),
CNDArrayWrapper::makeAllSlice());
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> bin_cutoff_wrapper = bin_time_wrapper.slice<1>(
CNDArrayWrapper::makeIdxSlice(trial_idx),
CNDArrayWrapper::makeAllSlice());
int64_t trial_bin_start = bin_time_wrapper.valueAt(trial_idx, 0);
spike_offset = binary_search_index<int64_t>(spike_time_wrapper, trial_bin_start);
spike_offset = bin_spikes_single_cell(spike_time_wrapper, output_bin_wrapper,
bin_cutoff_wrapper, spike_offset);
}
}
return binned_output;
}
ContigNPArray<int64_t> bin_spikes_movie(
py::dict &spikes_by_cell_id,
py::list &cell_order,
ContigNPArray<int64_t> &movie_bin_cutoffs) {
// figure out how many how many bins there are
py::buffer_info bin_info = movie_bin_cutoffs.request();
int64_t *bin_time_matrix_ptr = static_cast<int64_t *> (bin_info.ptr);
const int64_t n_bin_cutoffs = bin_info.shape[0];
const int64_t n_bins = n_bin_cutoffs - 1;
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> bin_time_wrapper(
bin_time_matrix_ptr,
{n_bin_cutoffs,});
// figure out how many cells there are
const int64_t n_cells = cell_order.size();
// allocate the output binned times
auto output_buffer_info = py::buffer_info(
nullptr, /* Pointer to data (nullptr -> ask NumPy to allocate!) */
sizeof(int64_t), /* Size of one item */
py::format_descriptor<int64_t>::value, /* Buffer format */
2, /* How many dimensions? */
{n_cells, n_bins}, /* Number of elements for each dimension */
{sizeof(int64_t) * n_bins, sizeof(int64_t)} /* Strides for each dim */
);
ContigNPArray<int64_t> binned_output = ContigNPArray<int64_t>(output_buffer_info);
py::buffer_info output_info = binned_output.request();
int64_t *output_data_ptr = static_cast<int64_t *> (output_info.ptr);
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 2> output_wrapper(
output_data_ptr,
{n_cells, n_bins});
// loop over the cells
// within each loop bin spikes for the corresponding cell
for (int64_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
py::object cell_id_pykey = cell_order[cell_idx];
ContigNPArray<int64_t> spikes_for_current_cell = py::cast<ContigNPArray<int64_t >>(
spikes_by_cell_id[cell_id_pykey]);
py::buffer_info spike_time_info = spikes_for_current_cell.request();
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> spike_time_wrapper(
static_cast<int64_t *>(spike_time_info.ptr),
{spike_time_info.shape[0],});
CNDArrayWrapper::StaticNDArrayWrapper<int64_t, 1> write_wrapper = output_wrapper.slice<1>(
CNDArrayWrapper::makeIdxSlice(cell_idx),
CNDArrayWrapper::makeAllSlice());
bin_spikes_single_cell(spike_time_wrapper, write_wrapper, bin_time_wrapper, 0);
}
return binned_output;
}
template<class T>
ContigNPArray<T> merge_multiple_sorted_array(
py::list &list_of_spike_trains) {
/*
* Merges spike trains of oversplits
* Uses classic min-heap algorithm to merge N sorted spike trains
*
* @param list_of_spike_trains
*/
/*
* Implementation note: std::priority_queue is a max heap
*
* We want a min heap because we want to merge the spike times
* in increasing order, so the priority in MergeWrapper is set up
* to be the negative of the first unread spike time
*/
std::priority_queue < T, std::vector < MergeWrapper < T >>, ComparePriority < T >> priorityQueue;
int64_t total_size = 0;
for (auto item : list_of_spike_trains) {
// convert/cast item
ContigNPArray<T> spike_train = py::cast<ContigNPArray<T >>(item);
py::buffer_info array_info = spike_train.request();
T *base_ptr = static_cast<T *> (array_info.ptr);
int64_t current_size = array_info.shape[0];
priorityQueue.push(MergeWrapper<T>(base_ptr, current_size));
total_size = total_size + current_size;
}
// allocate the output binned times
auto merged_buffer_info = py::buffer_info(
nullptr, /* Pointer to data (nullptr -> ask NumPy to allocate!) */
sizeof(T), /* Size of one item */
py::format_descriptor<T>::value, /* Buffer format */
1, /* How many dimensions? */
{total_size}, /* Number of elements for each dimension */
{sizeof(T)} /* Strides for each dim */
);
ContigNPArray<T> merged_output = ContigNPArray<T>(merged_buffer_info);
py::buffer_info output_info = merged_output.request();
T *output_base_ptr = static_cast<T *>(output_info.ptr);
int64_t write_offset = 0;
while (!priorityQueue.empty()) {
MergeWrapper <T> min_element = priorityQueue.top();
priorityQueue.pop();
T current_val = min_element.getCurrent();
*(output_base_ptr + write_offset) = current_val;
++write_offset;
min_element.increment();
if (!min_element.atEnd()) {
priorityQueue.push(min_element);
}
}
return merged_output;
}
|
task_wait2.c | /* input: result
* Based on A.13.4c, p182 of OMP 3.0 spec.
* Liao, 9/15/2008
*/
#include <stdio.h>
#include <assert.h>
unsigned long int input = 40;
unsigned long int fib(unsigned long int n) {
unsigned long int i, j;
if (n<2)
return n;
else
{
#pragma omp task shared(i)
i=fib(n-1);
#pragma omp task shared(j)
j=fib(n-2);
#pragma omp taskwait
return i+j;
}
}
int main()
{
unsigned long int result = 0;
#pragma omp parallel
{
#pragma omp single
{
result = fib(input);
}
}
printf("Fibonacci number for %lu is:%lu\n",input, result);
assert(result == 102334155);
return 0;
}
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/matrix-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MagickPathExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AcquireSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
RelinquishSemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns).
% Also represents the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usally 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns',
% you can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL,exception);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register Quantum
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
*q=ClampToQuantum(value);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
GB_binop__eq_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__eq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__eq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc64)
// C=scalar+B GB (_bind1st__eq_fc64)
// C=scalar+B' GB (_bind1st_tran__eq_fc64)
// C=A+scalar GB (_bind2nd__eq_fc64)
// C=A'+scalar GB (_bind2nd_tran__eq_fc64)
// C type: bool
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_eq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = (creal (GBX (Ax, pA, A_iso)) != 0) || (cimag (GBX (Ax, pA, A_iso)) != 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = (creal (GBX (Bx, pB, B_iso)) != 0) || (cimag (GBX (Bx, pB, B_iso)) != 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_eq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_FC64 || GxB_NO_EQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__eq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_eq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_eq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_eq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kpoint.c | /* kpoint.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <stdio.h>
#include <stdlib.h>
#include "mathfunc.h"
#include "kpoint.h"
const int kpt_bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3]);
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal);
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT* rot_reciprocal);
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3]);
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3]);
static int get_grid_point_double_mesh(const int address_double[3],
const int mesh[3]);
static int get_grid_point_single_mesh(const int address[3],
const int mesh[3]);
static void reduce_grid_address(int address[3],
const int address_double[3],
const int mesh[3]);
int kpt_get_grid_point_double_mesh(const int address_double[3],
const int mesh[3])
{
return get_grid_point_double_mesh(address_double, mesh);
}
/* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int num_ir;
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#endif
return num_ir;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const int num_q,
SPGCONST double qpoints[][3])
{
int num_ir;
MatINT *rot_reciprocal, *rot_reciprocal_q;
double tolerance;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
num_q,
qpoints);
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#endif
mat_free_MatINT(rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
void kpt_get_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3])
{
int i;
int address_double_orig[3], address_double[3];
for (i = 0; i < 3; i++) {
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
rot_grid_points[i] = get_grid_point_double_mesh(address_double, mesh);
}
}
void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3],
const int bz_map[])
{
int i;
int address_double_orig[3], address_double[3], bzmesh[3];
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
rot_grid_points[i] =
bz_map[get_grid_point_double_mesh(address_double, bzmesh)];
}
}
int kpt_relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
return relocate_BZ_grid_address(bz_grid_address,
bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
}
MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
return get_point_group_reciprocal(rotations, is_time_reversal);
}
MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
return get_point_group_reciprocal_with_q(rot_reciprocal,
symprec,
num_q,
qpoints);
}
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_rot;
MatINT *rot_reciprocal, *rot_return;
int *unique_rot;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
if (is_time_reversal) {
rot_reciprocal = mat_alloc_MatINT(rotations->size * 2);
} else {
rot_reciprocal = mat_alloc_MatINT(rotations->size);
}
unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
unique_rot[i] = -1;
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
num_rot = 0;
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_rot; j++) {
if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]],
rot_reciprocal->mat[i])) {
goto escape;
}
}
unique_rot[num_rot] = i;
num_rot++;
escape:
;
}
rot_return = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); }
free(unique_rot);
mat_free_MatINT(rot_reciprocal);
return rot_return;
}
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok, num_rot;
int *ir_rot;
double q_rot[3], diff[3];
MatINT * rot_reciprocal_q;
is_all_ok = 0;
num_rot = 0;
ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
rot_reciprocal->mat[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
ir_rot[num_rot] = i;
num_rot++;
}
}
rot_reciprocal_q = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
free(ir_rot);
return rot_reciprocal_q;
}
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] are 0 or 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* map: the mapping from each point to ir-point. */
int i, j, k, l, grid_point, grid_point_rot, num_ir = 0;
int address[3], address_double[3], address_double_rot[3];
/* "-1" means the element is not touched yet. */
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
map[i] = -1;
}
#ifndef GRID_ORDER_XYZ
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
address[0] = k;
address[1] = j;
address[2] = i;
#else
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
address[0] = i;
address[1] = j;
address[2] = k;
#endif
for (l = 0; l < 3; l++) {
address_double[l] = address[l] * 2 + is_shift[l];
}
grid_point = get_grid_point_double_mesh(address_double, mesh);
reduce_grid_address(grid_address[grid_point], address, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[l],
address_double);
grid_point_rot = get_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (map[grid_point_rot] > -1) {
map[grid_point] = map[grid_point_rot];
break;
}
}
}
if (map[grid_point] == -1) {
map[grid_point] = grid_point;
num_ir++;
}
}
}
}
return num_ir;
}
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal)
{
int i, j, k, l, grid_point, grid_point_rot, num_ir;
int address[3], address_double[3], address_double_rot[3];
#ifndef GRID_ORDER_XYZ
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
address[0] = k;
address[1] = j;
address[2] = i;
#else
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
address[0] = i;
address[1] = j;
address[2] = k;
#endif
for (l = 0; l < 3; l++) {
address_double[l] = address[l] * 2 + is_shift[l];
}
grid_point = get_grid_point_double_mesh(address_double, mesh);
map[grid_point] = grid_point;
reduce_grid_address(grid_address[grid_point], address, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[l],
address_double);
grid_point_rot = get_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (grid_point_rot < map[grid_point]) {
map[grid_point] = grid_point_rot;
}
}
}
}
}
}
num_ir = 0;
#pragma omp parallel for reduction(+:num_ir)
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
if (map[i] == i) {
num_ir++;
}
}
return num_ir;
}
/* Relocate grid addresses to first Brillouin zone */
/* bz_grid_address[prod(mesh + 1)][3] */
/* bz_map[prod(mesh * 2)] */
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
double tolerance, min_distance;
double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE];
int bzmesh[3], bz_address_double[3];
int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp;
tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh);
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
}
for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) {
bz_map[i] = -1;
}
boundary_num_gp = 0;
total_num_gp = mesh[0] * mesh[1] * mesh[2];
for (i = 0; i < total_num_gp; i++) {
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
for (k = 0; k < 3; k++) {
q_vector[k] =
((grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]) * 2 +
is_shift[k]) / ((double)mesh[k]) / 2;
}
mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector);
distance[j] = mat_norm_squared_d3(q_vector);
}
min_distance = distance[0];
min_index = 0;
for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance) {
min_distance = distance[j];
min_index = j;
}
}
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance + tolerance) {
if (j == min_index) {
gp = i;
} else {
gp = boundary_num_gp + total_num_gp;
}
for (k = 0; k < 3; k++) {
bz_grid_address[gp][k] =
grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k];
bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k];
}
bzgp = get_grid_point_double_mesh(bz_address_double, bzmesh);
bz_map[bzgp] = gp;
if (j != min_index) {
boundary_num_gp++;
}
}
}
}
return boundary_num_gp + total_num_gp;
}
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3])
{
int i, j;
double tolerance;
double length[3];
for (i = 0; i < 3; i++) {
length[i] = 0;
for (j = 0; j < 3; j++) {
length[i] += rec_lattice[j][i] * rec_lattice[j][i];
}
length[i] /= mesh[i] * mesh[i];
}
tolerance = length[0];
for (i = 1; i < 3; i++) {
if (tolerance < length[i]) {
tolerance = length[i];
}
}
tolerance *= 0.01;
return tolerance;
}
static int get_grid_point_double_mesh(const int address_double[3],
const int mesh[3])
{
int i, address[3];
for (i = 0; i < 3; i++) {
if (address_double[i] % 2 == 0) {
address[i] = address_double[i] / 2;
} else {
address[i] = (address_double[i] - 1) / 2;
}
}
mat_modulo_i3(address, mesh);
return get_grid_point_single_mesh(address, mesh);
}
static int get_grid_point_single_mesh(const int address[3],
const int mesh[3])
{
#ifndef GRID_ORDER_XYZ
return address[2] * mesh[0] * mesh[1] + address[1] * mesh[0] + address[0];
#else
return address[0] * mesh[1] * mesh[2] + address[1] * mesh[2] + address[2];
#endif
}
static void reduce_grid_address(int reduced_address[3],
const int address[3],
const int mesh[3])
{
int i;
for (i = 0; i < 3; i++) {
#ifndef GRID_BOUNDARY_AS_NEGATIVE
reduced_address[i] = address[i] - mesh[i] * (address[i] > mesh[i] / 2);
#else
reduced_address[i] = address[i] - mesh[i] * (address[i] >= mesh[i] / 2);
#endif
}
}
|
pcmemory.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*
* -- SuperLU MT routine (version 2.2) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley,
* and Xerox Palo Alto Research Center.
* September 10, 2007
*
* Last modified:
* -- 8/29/2013: added lock to access Stack memory supplied by user
*
*/
#include "slu_mt_cdefs.h"
/* ------------------
Constants & Macros
------------------ */
#define EXPAND 1.5
#define NO_MEMTYPE 4 /* 0: lusup;
1: ucol;
2: lsub;
3: usub */
#define GluIntArray(n) (9 * (n) + 5)
/* -------------------
Internal prototypes
------------------- */
void *pcgstrf_expand (int_t *, MemType,int_t, int_t, GlobalLU_t *);
void copy_mem_complex (int_t, void *, void *);
void pcgstrf_StackCompress(GlobalLU_t *);
void pcgstrf_SetupSpace (void *, int_t);
void *cuser_malloc (int_t, int_t);
void cuser_free (int_t, int_t);
/* ----------------------------------------------
External prototypes (in memory.c - prec-indep)
---------------------------------------------- */
extern void copy_mem_int (int_t, void *, void *);
extern void user_bcopy (char *, char *, int_t);
typedef struct {
int_t size;
int_t used;
int_t top1; /* grow upward, relative to &array[0] */
int_t top2; /* grow downward */
void *array;
#if ( MACH==PTHREAD )
pthread_mutex_t lock;;
#endif
} LU_stack_t;
typedef enum {HEAD, TAIL} stack_end_t;
typedef enum {SYSTEM, USER} LU_space_t;
ExpHeader *cexpanders = 0; /* Array of pointers to 4 types of memory */
static LU_stack_t stack;
static int_t no_expand;
static int_t ndim;
static LU_space_t whichspace; /* 0 - system malloc'd; 1 - user provided */
/* Macros to manipulate stack */
#define StackFull(x) ( x + stack.used >= stack.size )
#define NotDoubleAlign(addr) ( (long long int)addr & 7 )
#define DoubleAlign(addr) ( ((long long int)addr + 7) & ~7L )
#define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */
/* temporary space used by BLAS calls */
#define NUM_TEMPV(n,w,t,b) (SUPERLU_MAX( 2*n, (t + b)*w ))
/*
* Setup the memory model to be used for factorization.
* lwork = 0: use system malloc;
* lwork > 0: use user-supplied work[] space.
*/
void pcgstrf_SetupSpace(void *work, int_t lwork)
{
if ( lwork == 0 ) {
whichspace = SYSTEM; /* malloc/free */
} else if ( lwork > 0 ) {
whichspace = USER; /* user provided space */
stack.size = lwork;
stack.used = 0;
stack.top1 = 0;
stack.top2 = lwork;
stack.array = (void *) work;
}
#if ( MACH==PTHREAD )
pthread_mutex_init ( &stack.lock, NULL);
#endif
}
/*
* Destroy the lock used for user stack memory.
*/
void pcgstrf_StackFree()
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
if ( whichspace == USER )
pthread_mutex_destroy( &stack.lock );
#endif
}
void *cuser_malloc(int_t bytes, int_t which_end)
{
void *buf;
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( StackFull(bytes) ) {
buf = NULL;
goto end;
}
if ( which_end == HEAD ) {
buf = (char*) stack.array + stack.top1;
stack.top1 += bytes;
} else {
stack.top2 -= bytes;
buf = (char*) stack.array + stack.top2;
}
stack.used += bytes;
end: ;
} /* ---- end critical section ---- */
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
return buf;
}
void cuser_free(int_t bytes, int_t which_end)
{
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
if ( which_end == HEAD ) stack.top1 -= bytes;
else stack.top2 += bytes;
stack.used -= bytes;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
/* Returns the working storage used during factorization */
int_t superlu_cTempSpace(int_t n, int_t w, int_t p)
{
register float tmp, ptmp;
register int_t iword = sizeof(int_t), dword = sizeof(complex);
int_t maxsuper = sp_ienv(3), rowblk = sp_ienv(4);
/* globally shared */
tmp = 14 * n * iword;
/* local to each processor */
ptmp = (2 * w + 5 + NO_MARKER) * n * iword;
ptmp += (n * w + NUM_TEMPV(n,w,maxsuper,rowblk)) * dword;
#if ( PRNTlevel>=1 )
printf("Per-processor work[] %.0f MB\n", ptmp/1024/1024);
#endif
ptmp *= p;
return (tmp + ptmp);
}
/*
* superlu_memusage consists of the following fields:
* o for_lu (float)
* The amount of space used in bytes for L\U data structures.
* o total_needed (float)
* The amount of space needed in bytes to perform factorization.
* o expansions (int)
* The number of memory expansions during the LU factorization.
*/
int_t superlu_cQuerySpace(int_t P, SuperMatrix *L, SuperMatrix *U, int_t panel_size,
superlu_memusage_t *superlu_memusage)
{
SCPformat *Lstore;
NCPformat *Ustore;
register int_t n, iword, dword, lwork;
Lstore = L->Store;
Ustore = U->Store;
n = L->ncol;
iword = sizeof(int_t);
dword = sizeof(complex);
/* L supernodes of type SCP */
superlu_memusage->for_lu = (float) (7*n + 3) * iword
+ (float) Lstore->nzval_colend[n-1] * dword
+ (float) Lstore->rowind_colend[n-1] * iword;
/* U columns of type NCP */
superlu_memusage->for_lu += (2*n + 1) * iword
+ (float) Ustore->colend[n-1] * (dword + iword);
/* Working storage to support factorization */
lwork = superlu_cTempSpace(n, panel_size, P);
superlu_memusage->total_needed = superlu_memusage->for_lu + lwork;
superlu_memusage->expansions = --no_expand;
return 0;
}
float pcgstrf_memory_use(const int_t nzlmax, const int_t nzumax, const int_t nzlumax)
{
register float iword, dword, t;
iword = sizeof(int_t);
dword = sizeof(complex);
t = 10. * ndim * iword + nzlmax * iword + nzumax * (iword + dword)
+ nzlumax * dword;
return t;
}
/*
* Allocate storage for the data structures common to all factor routines.
* For those unpredictable size, make a guess as FILL * nnz(A).
* Return value:
* If lwork = -1, return the estimated amount of space required;
* otherwise, return the amount of space actually allocated when
* memory allocation failure occurred.
*/
float
pcgstrf_MemInit(int_t n, int_t annz, superlumt_options_t *superlumt_options,
SuperMatrix *L, SuperMatrix *U, GlobalLU_t *Glu)
{
register int_t nprocs = superlumt_options->nprocs;
yes_no_t refact = superlumt_options->refact;
register int_t panel_size = superlumt_options->panel_size;
register int_t lwork = superlumt_options->lwork;
void *work = superlumt_options->work;
int_t iword, dword, retries = 0;
SCPformat *Lstore;
NCPformat *Ustore;
int_t *xsup, *xsup_end, *supno;
int_t *lsub, *xlsub, *xlsub_end;
complex *lusup;
int_t *xlusup, *xlusup_end;
complex *ucol;
int_t *usub, *xusub, *xusub_end;
int_t nzlmax, nzumax, nzlumax;
int_t FILL_LUSUP = sp_ienv(6); /* Guess the fill-in growth for LUSUP */
int_t FILL_UCOL = sp_ienv(7); /* Guess the fill-in growth for UCOL */
int_t FILL_LSUB = sp_ienv(8); /* Guess the fill-in growth for LSUB */
no_expand = 0;
ndim = n;
iword = sizeof(int_t);
dword = sizeof(complex);
if ( !cexpanders )
cexpanders = (ExpHeader *) SUPERLU_MALLOC(NO_MEMTYPE * sizeof(ExpHeader));
if ( refact == NO ) {
/* Guess amount of storage needed by L\U factors. */
if ( FILL_UCOL < 0 ) nzumax = -FILL_UCOL * annz;
else nzumax = FILL_UCOL;
if ( FILL_LSUB < 0 ) nzlmax = -FILL_LSUB * annz;
else nzlmax = FILL_LSUB;
if ( Glu->dynamic_snode_bound == YES ) {
if ( FILL_LUSUP < 0 ) nzlumax = -FILL_LUSUP * annz;
else nzlumax = FILL_LUSUP; /* estimate an upper bound */
} else {
nzlumax = Glu->nzlumax; /* preset as static upper bound */
}
if ( lwork == -1 ) {
return (GluIntArray(n) * iword +
superlu_cTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else {
pcgstrf_SetupSpace(work, lwork);
}
/* Integer pointers for L\U factors */
if ( whichspace == SYSTEM ) {
xsup = intMalloc(n+1);
xsup_end = intMalloc(n);
supno = intMalloc(n+1);
xlsub = intMalloc(n+1);
xlsub_end = intMalloc(n);
xlusup = intMalloc(n+1);
xlusup_end = intMalloc(n);
xusub = intMalloc(n+1);
xusub_end = intMalloc(n);
} else {
xsup = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xsup_end = (int_t *)cuser_malloc((n) * iword, HEAD);
supno = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlsub = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlsub_end = (int_t *)cuser_malloc((n) * iword, HEAD);
xlusup = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xlusup_end = (int_t *)cuser_malloc((n) * iword, HEAD);
xusub = (int_t *)cuser_malloc((n+1) * iword, HEAD);
xusub_end = (int_t *)cuser_malloc((n) * iword, HEAD);
}
lusup = (complex *) pcgstrf_expand( &nzlumax, LUSUP, 0, 0, Glu );
ucol = (complex *) pcgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pcgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pcgstrf_expand( &nzumax, USUB, 0, 1, Glu );
while ( !ucol || !lsub || !usub ) {
/*SUPERLU_ABORT("Not enough core in LUMemInit()");*/
#if (PRNTlevel==1)
printf(".. pcgstrf_MemInit(): #retries " IFMT "\n", ++retries);
#endif
if ( whichspace == SYSTEM ) {
SUPERLU_FREE(ucol);
SUPERLU_FREE(lsub);
SUPERLU_FREE(usub);
} else {
cuser_free(nzumax*dword+(nzlmax+nzumax)*iword, HEAD);
}
nzumax /= 2; /* reduce request */
nzlmax /= 2;
if ( nzumax < annz/2 ) {
printf("Not enough memory to perform factorization.\n");
return (pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + n);
}
ucol = (complex *) pcgstrf_expand( &nzumax, UCOL, 0, 0, Glu );
lsub = (int_t *) pcgstrf_expand( &nzlmax, LSUB, 0, 0, Glu );
usub = (int_t *) pcgstrf_expand( &nzumax, USUB, 0, 1, Glu );
}
if ( !lusup ) {
float t = pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + n;
printf("Not enough memory to perform factorization .. "
"need %.1f GBytes\n", t*1e-9);
fflush(stdout);
return (t);
}
} else { /* refact == YES */
Lstore = L->Store;
Ustore = U->Store;
xsup = Lstore->sup_to_colbeg;
xsup_end = Lstore->sup_to_colend;
supno = Lstore->col_to_sup;
xlsub = Lstore->rowind_colbeg;
xlsub_end= Lstore->rowind_colend;
xlusup = Lstore->nzval_colbeg;
xlusup_end= Lstore->nzval_colend;
xusub = Ustore->colbeg;
xusub_end= Ustore->colend;
nzlmax = Glu->nzlmax; /* max from previous factorization */
nzumax = Glu->nzumax;
nzlumax = Glu->nzlumax;
if ( lwork == -1 ) {
return (GluIntArray(n) * iword + superlu_cTempSpace(n, panel_size, nprocs)
+ (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword);
} else if ( lwork == 0 ) {
whichspace = SYSTEM;
} else {
whichspace = USER;
stack.size = lwork;
stack.top2 = lwork;
}
lsub = cexpanders[LSUB].mem = Lstore->rowind;
lusup = cexpanders[LUSUP].mem = Lstore->nzval;
usub = cexpanders[USUB].mem = Ustore->rowind;
ucol = cexpanders[UCOL].mem = Ustore->nzval;;
cexpanders[LSUB].size = nzlmax;
cexpanders[LUSUP].size = nzlumax;
cexpanders[USUB].size = nzumax;
cexpanders[UCOL].size = nzumax;
}
Glu->xsup = xsup;
Glu->xsup_end = xsup_end;
Glu->supno = supno;
Glu->lsub = lsub;
Glu->xlsub = xlsub;
Glu->xlsub_end = xlsub_end;
Glu->lusup = lusup;
Glu->xlusup = xlusup;
Glu->xlusup_end = xlusup_end;
Glu->ucol = ucol;
Glu->usub = usub;
Glu->xusub = xusub;
Glu->xusub_end = xusub_end;
Glu->nzlmax = nzlmax;
Glu->nzumax = nzumax;
Glu->nzlumax = nzlumax;
++no_expand;
#if ( PRNTlevel>=1 )
printf(".. pcgstrf_MemInit() refact %d, whichspace %d, nzlumax " IFMT ", nzumax " IFMT ", nzlmax " IFMT "\n",
refact, whichspace, nzlumax, nzumax, nzlmax);
printf(".. pcgstrf_MemInit() FILL_LUSUP " IFMT ", FILL_UCOL " IFMT ", FILL_LSUB " IFMT "\n",
FILL_LUSUP, FILL_UCOL, FILL_LSUB);
fflush(stdout);
#endif
return 0;
} /* pcgstrf_MemInit */
/*
* Allocate known working storage. Returns 0 if success, otherwise
* returns the number of bytes allocated so far when failure occurred.
*/
int_t
pcgstrf_WorkInit(int_t n, int_t panel_size, int_t **iworkptr, complex **dworkptr)
{
int_t isize, dsize, extra;
complex *old_ptr;
int_t maxsuper = sp_ienv(3),
rowblk = sp_ienv(4);
isize = (2*panel_size + 5 + NO_MARKER) * n * sizeof(int_t);
dsize = (n * panel_size +
NUM_TEMPV(n,panel_size,maxsuper,rowblk)) * sizeof(complex);
if ( whichspace == SYSTEM )
*iworkptr = (int_t *) intCalloc(isize/sizeof(int_t));
else
*iworkptr = (int_t *) cuser_malloc(isize, TAIL);
if ( ! *iworkptr ) {
fprintf(stderr, "pcgstrf_WorkInit: malloc fails for local iworkptr[]\n");
return (isize + n);
}
if ( whichspace == SYSTEM )
*dworkptr = (complex *) SUPERLU_MALLOC((size_t) dsize);
else {
*dworkptr = (complex *) cuser_malloc(dsize, TAIL);
if ( NotDoubleAlign(*dworkptr) ) {
old_ptr = *dworkptr;
*dworkptr = (complex*) DoubleAlign(*dworkptr);
*dworkptr = (complex*) ((double*)*dworkptr - 1);
extra = (char*)old_ptr - (char*)*dworkptr;
#if ( DEBUGlevel>=1 )
printf("pcgstrf_WorkInit: not aligned, extra" IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top2 -= extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
} /* else */
if ( ! *dworkptr ) {
printf("malloc fails for local dworkptr[] ... dsize " IFMT "\n", dsize);
return (isize + dsize + n);
}
return 0;
}
/*
* Set up pointers for real working arrays.
*/
void
pcgstrf_SetRWork(int_t n, int_t panel_size, complex *dworkptr,
complex **dense, complex **tempv)
{
complex zero = {0.0, 0.0};
int_t maxsuper = sp_ienv(3);
int_t rowblk = sp_ienv(4);
*dense = dworkptr;
*tempv = *dense + panel_size*n;
cfill (*dense, n * panel_size, zero);
cfill (*tempv, NUM_TEMPV(n,panel_size,maxsuper,rowblk), zero);
}
/*
* Free the working storage used by factor routines.
*/
void pcgstrf_WorkFree(int_t *iwork, complex *dwork, GlobalLU_t *Glu)
{
if ( whichspace == SYSTEM ) {
SUPERLU_FREE (iwork);
SUPERLU_FREE (dwork);
} else {
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.used -= (stack.size - stack.top2);
stack.top2 = stack.size;
/* pcgstrf_StackCompress(Glu); */
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
}
/*
* Expand the data structures for L and U during the factorization.
* Return value: 0 - successful return
* > 0 - number of bytes allocated when run out of space
*
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
* !! Warning: Not Implemented in SuperLU_MT !!
* @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
*/
int_t
pcgstrf_MemXpand(
int_t jcol,
int_t next, /* number of elements currently in the factors */
MemType mem_type,/* which type of memory to expand */
int_t *maxlen, /* modified - max. length of a data structure */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
void *new_mem;
#ifdef CHK_EXPAND
printf("pcgstrf_MemXpand(): jcol " IFMT ", next " IFMT ", maxlen " IFMT ", MemType " IFMT "\n",
jcol, next, *maxlen, mem_type);
#endif
if (mem_type == USUB)
new_mem = pcgstrf_expand(maxlen, mem_type, next, 1, Glu);
else
new_mem = pcgstrf_expand(maxlen, mem_type, next, 0, Glu);
if ( !new_mem ) {
int_t nzlmax = Glu->nzlmax;
int_t nzumax = Glu->nzumax;
int_t nzlumax = Glu->nzlumax;
fprintf(stderr, "Can't expand MemType %d : jcol " IFMT "\n",
mem_type, jcol);
return (pcgstrf_memory_use(nzlmax, nzumax, nzlumax) + ndim);
}
switch ( mem_type ) {
case LUSUP:
Glu->lusup = (complex *) new_mem;
Glu->nzlumax = *maxlen;
break;
case UCOL:
Glu->ucol = (complex *) new_mem;
Glu->nzumax = *maxlen;
break;
case LSUB:
Glu->lsub = (int_t *) new_mem;
Glu->nzlmax = *maxlen;
break;
case USUB:
Glu->usub = (int_t *) new_mem;
Glu->nzumax = *maxlen;
break;
}
return 0;
}
void
copy_mem_complex(int_t howmany, void *old, void *new)
{
register int_t i;
complex *dold = old;
complex *dnew = new;
for (i = 0; i < howmany; i++) dnew[i] = dold[i];
}
/*
* Expand the existing storage to accommodate more fill-ins.
*/
void
*pcgstrf_expand(
int_t *prev_len, /* length used from previous call */
MemType type, /* which part of the memory to expand */
int_t len_to_copy, /* size of memory to be copied to new store */
int_t keep_prev, /* = 1: use prev_len;
= 0: compute new_len to expand */
GlobalLU_t *Glu /* modified - global LU data structures */
)
{
double alpha = EXPAND;
void *new_mem, *old_mem;
int_t new_len, tries, lword, extra, bytes_to_copy;
void *ret = NULL;
if ( no_expand == 0 || keep_prev ) /* First time allocate requested */
new_len = *prev_len;
else {
new_len = alpha * *prev_len;
}
if ( type == LSUB || type == USUB ) lword = sizeof(int_t);
else lword = sizeof(complex);
if ( whichspace == SYSTEM ) {
new_mem = (void *) SUPERLU_MALLOC( (size_t) new_len * lword );
if ( no_expand != 0 ) {
tries = 0;
if ( keep_prev ) {
if ( !new_mem ) return (NULL);
} else {
while ( !new_mem ) {
if ( ++tries > 10 ) return (NULL);
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
new_mem = (void *) SUPERLU_MALLOC((size_t) new_len * lword);
}
}
if ( type == LSUB || type == USUB ) {
copy_mem_int(len_to_copy, cexpanders[type].mem, new_mem);
} else {
copy_mem_complex(len_to_copy, cexpanders[type].mem, new_mem);
}
SUPERLU_FREE (cexpanders[type].mem);
}
cexpanders[type].mem = (void *) new_mem;
} else { /* whichspace == USER */
if ( no_expand == 0 ) {
new_mem = cuser_malloc(new_len * lword, HEAD);
if ( NotDoubleAlign(new_mem) &&
(type == LUSUP || type == UCOL) ) {
old_mem = new_mem;
new_mem = (void *)DoubleAlign(new_mem);
extra = (char*)new_mem - (char*)old_mem;
#ifdef CHK_EXPAND
printf("expand(): not aligned, extra " IFMT "\n", extra);
#endif
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_lock( &stack.lock );
#elif ( MACH==OPENMP ) /* Use openMP ... */
#pragma omp critical ( STACK_LOCK )
#endif
{
stack.top1 += extra;
stack.used += extra;
}
#if ( MACH==PTHREAD ) /* Use pthread ... */
pthread_mutex_unlock( &stack.lock );
#endif
}
cexpanders[type].mem = (void *) new_mem;
} else {
tries = 0;
extra = (new_len - *prev_len) * lword;
if ( keep_prev ) {
if ( StackFull(extra) ) {
new_len = 0;
cexpanders[type].mem = NULL;
return NULL;
}
} else {
while ( StackFull(extra) ) {
if ( ++tries > 10 ) {
new_len = 0;
cexpanders[type].mem = NULL;
return NULL;
}
alpha = Reduce(alpha);
new_len = alpha * *prev_len;
extra = (new_len - *prev_len) * lword;
}
}
if ( type != USUB ) {
new_mem = (void*)((char*)cexpanders[type + 1].mem + extra);
bytes_to_copy = (char*)stack.array + stack.top1
- (char*)cexpanders[type + 1].mem;
user_bcopy(cexpanders[type+1].mem, new_mem, bytes_to_copy);
if ( type < USUB ) {
Glu->usub = cexpanders[USUB].mem =
(void*)((char*)cexpanders[USUB].mem + extra);
}
if ( type < LSUB ) {
Glu->lsub = cexpanders[LSUB].mem =
(void*)((char*)cexpanders[LSUB].mem + extra);
}
if ( type < UCOL ) {
Glu->ucol = cexpanders[UCOL].mem =
(void*)((char*)cexpanders[UCOL].mem + extra);
}
stack.top1 += extra;
stack.used += extra;
if ( type == UCOL ) {
stack.top1 += extra; /* Add same amount for USUB */
stack.used += extra;
}
} /* if ... */
} /* else ... */
} /* else, whichspace == USER */
#ifdef DEBUG
printf("pcgstrf_expand[type " IFMT "]\n", type);
#endif
cexpanders[type].size = new_len;
*prev_len = new_len;
if ( no_expand ) ++no_expand;
return (void *) cexpanders[type].mem;
} /* expand */
/*
* Compress the work[] array to remove fragmentation.
*/
void
pcgstrf_StackCompress(GlobalLU_t *Glu)
{
register int_t iword, dword;
char *last, *fragment;
int_t *ifrom, *ito;
complex *dfrom, *dto;
int_t *xlsub, *lsub, *xusub_end, *usub, *xlusup;
complex *ucol, *lusup;
iword = sizeof(int_t);
dword = sizeof(complex);
xlsub = Glu->xlsub;
lsub = Glu->lsub;
xusub_end = Glu->xusub_end;
usub = Glu->usub;
xlusup = Glu->xlusup;
ucol = Glu->ucol;
lusup = Glu->lusup;
dfrom = ucol;
dto = (complex *)((char*)lusup + xlusup[ndim] * dword);
copy_mem_complex(xusub_end[ndim-1], dfrom, dto);
ucol = dto;
ifrom = lsub;
ito = (int_t *) ((char*)ucol + xusub_end[ndim-1] * iword);
copy_mem_int(xlsub[ndim], ifrom, ito);
lsub = ito;
ifrom = usub;
ito = (int_t *) ((char*)lsub + xlsub[ndim] * iword);
copy_mem_int(xusub_end[ndim-1], ifrom, ito);
usub = ito;
last = (char*)usub + xusub_end[ndim-1] * iword;
fragment = (char*) ((char*)stack.array + stack.top1 - last);
stack.used -= (long long int) fragment;
stack.top1 -= (long long int) fragment;
Glu->ucol = ucol;
Glu->lsub = lsub;
Glu->usub = usub;
#ifdef CHK_EXPAND
printf("pcgstrf_StackCompress: fragment " IFMT "\n", fragment);
/* PrintStack("After compress", Glu);
for (last = 0; last < ndim; ++last)
print_lu_col("After compress:", last, 0);*/
#endif
}
/*
* Allocate storage for original matrix A
*/
void
callocateA(int_t n, int_t nnz, complex **a, int_t **asub, int_t **xa)
{
*a = (complex *) complexMalloc(nnz);
*asub = (int_t *) intMalloc(nnz);
*xa = (int_t *) intMalloc(n+1);
}
complex *complexMalloc(int_t n)
{
complex *buf;
buf = (complex *) SUPERLU_MALLOC( (size_t) n * sizeof(complex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in complexMalloc()");
exit (1);
}
return (buf);
}
complex *complexCalloc(int_t n)
{
complex *buf;
register int_t i;
complex zero = {0.0, 0.0};
buf = (complex *) SUPERLU_MALLOC( (size_t) n * sizeof(complex) );
if ( !buf ) {
fprintf(stderr, "SUPERLU_MALLOC failed for buf in complexCalloc()");
exit (1);
}
for (i = 0; i < n; ++i) buf[i] = zero;
return (buf);
}
/*
* Set up memory image in lusup[*], using the supernode boundaries in
* the Householder matrix.
*
* In both static and dynamic scheme, the relaxed supernodes (leaves)
* are stored in the beginning of lusup[*]. In the static scheme, the
* memory is also set aside for the internal supernodes using upper
* bound information from H. In the dynamic scheme, however, the memory
* for the internal supernodes is not allocated by this routine.
*
* Return value
* o Static scheme: number of nonzeros of all the supernodes in H.
* o Dynamic scheme: number of nonzeros of the relaxed supernodes.
*/
int_t
cPresetMap(
const int_t n,
SuperMatrix *A, /* original matrix permuted by columns */
pxgstrf_relax_t *pxgstrf_relax, /* relaxed supernodes */
superlumt_options_t *superlumt_options, /* input */
GlobalLU_t *Glu /* modified */
)
{
register int_t i, j, k, w, rs, rs_lastcol, krow, kmark, maxsup, nextpos;
register int_t rs_nrow; /* number of nonzero rows in a relaxed supernode */
int_t *marker, *asub, *xa_begin, *xa_end;
NCPformat *Astore;
int_t *map_in_sup; /* memory mapping function; values irrelevant on entry. */
int_t *colcnt; /* column count of Lc or H */
int_t *super_bnd; /* supernodes partition in H */
char *snode_env, *getenv();
snode_env = getenv("SuperLU_DYNAMIC_SNODE_STORE");
if ( snode_env != NULL ) {
Glu->dynamic_snode_bound = YES;
#if ( PRNTlevel>=1 )
printf(".. Use dynamic alg. to allocate storage for L supernodes.\n");
#endif
} else Glu->dynamic_snode_bound = NO;
Astore = A->Store;
asub = Astore->rowind;
xa_begin = Astore->colbeg;
xa_end = Astore->colend;
rs = 1;
marker = intMalloc(n);
ifill(marker, n, EMPTY);
map_in_sup = Glu->map_in_sup = intCalloc(n+1);
colcnt = superlumt_options->colcnt_h;
super_bnd = superlumt_options->part_super_h;
nextpos = 0;
/* Split large supernode into smaller pieces */
maxsup = sp_ienv(3);
for (j = 0; j < n; ) {
w = super_bnd[j];
k = j + w;
if ( w > maxsup ) {
w = w % maxsup;
if ( w == 0 ) w = maxsup;
while ( j < k ) {
super_bnd[j] = w;
j += w;
w = maxsup;
}
}
j = k;
}
for (j = 0; j < n; j += w) {
if ( Glu->dynamic_snode_bound == NO ) map_in_sup[j] = nextpos;
if ( pxgstrf_relax[rs].fcol == j ) {
/* Column j starts a relaxed supernode. */
map_in_sup[j] = nextpos;
rs_nrow = 0;
w = pxgstrf_relax[rs++].size;
rs_lastcol = j + w;
for (i = j; i < rs_lastcol; ++i) {
/* for each nonzero in A[*,i] */
for (k = xa_begin[i]; k < xa_end[i]; k++) {
krow = asub[k];
kmark = marker[krow];
if ( kmark != j ) { /* first time visit krow */
marker[krow] = j;
++rs_nrow;
}
}
}
nextpos += w * rs_nrow;
/* Find the next H-supernode, with leading column i, which is
outside the relaxed supernode, rs. */
for (i = j; i < rs_lastcol; k = i, i += super_bnd[i]);
if ( i > rs_lastcol ) {
/* The w columns [rs_lastcol, i) may join in the
preceeding relaxed supernode; make sure we leave
enough room for the combined supernode. */
w = i - rs_lastcol;
nextpos += w * SUPERLU_MAX( rs_nrow, colcnt[k] );
}
w = i - j;
} else { /* Column j starts a supernode in H */
w = super_bnd[j];
if ( Glu->dynamic_snode_bound == NO ) nextpos += w * colcnt[j];
}
/* Set up the offset (negative) to the leading column j of a
supernode in H */
for (i = 1; i < w; ++i) map_in_sup[j + i] = -i;
} /* for j ... */
if ( Glu->dynamic_snode_bound == YES ) Glu->nextlu = nextpos;
else map_in_sup[n] = nextpos;
#if ( PRNTlevel>=1 )
printf("** PresetMap() allocates " IFMT " reals to lusup[*]....\n", nextpos);
#endif
free (marker);
return nextpos;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.