source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
c-omp.c | /* This file contains routines to construct GNU OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "function.h"
#include "c-common.h"
#include "toplev.h"
#include "gimple.h"
#include "bitmap.h"
#include "langhooks.h"
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. */
tree
c_finish_omp_master (tree stmt)
{
return add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
}
/* Complete a #pragma omp critical construct. STMT is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. */
tree
c_finish_omp_critical (tree body, tree name)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. */
tree
c_finish_omp_ordered (tree stmt)
{
return add_stmt (build1 (OMP_ORDERED, void_type_node, stmt));
}
/* Complete a #pragma omp barrier construct. */
void
c_finish_omp_barrier (void)
{
tree x;
x = built_in_decls[BUILT_IN_GOMP_BARRIER];
x = build_call_expr (x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. */
void
c_finish_omp_taskwait (void)
{
tree x;
x = built_in_decls[BUILT_IN_GOMP_TASKWAIT];
x = build_call_expr (x, 0);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. The expression to be
implemented atomically is LHS code= RHS. The value returned is
either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC node which should be added to the current statement tree
with add_stmt. */
tree
c_finish_omp_atomic (enum tree_code code, tree lhs, tree rhs)
{
tree x, type, addr;
if (lhs == error_mark_node || rhs == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error ("invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
/* ??? Validate that rhs does not overlap lhs. */
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (input_location, ADDR_EXPR, lhs, 0);
if (addr == error_mark_node)
return error_mark_node;
addr = save_expr (addr);
if (TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL);
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
lhs = build_indirect_ref (input_location, addr, NULL);
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
x = build_modify_expr (input_location, lhs, code, rhs);
if (x == error_mark_node)
return error_mark_node;
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
return build2 (OMP_ATOMIC, void_type_node, addr, rhs);
}
/* Complete a #pragma omp flush construct. We don't do anything with the
variable list that the syntax allows. */
void
c_finish_omp_flush (void)
{
tree x;
x = built_in_decls[BUILT_IN_SYNCHRONIZE];
x = build_call_expr (x, 0);
add_stmt (x);
}
/* Check and canonicalize #pragma omp for increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
CASE_CONVERT:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert (TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2 (MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2 (PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
default:
break;
}
return error_mark_node;
}
/* Validate and emit code for the OpenMP directive #pragma omp for.
DECLV is a vector of iteration variables, for each collapsed loop.
INITV, CONDV and INCRV are vectors containing initialization
expressions, controlling predicates and increment expressions.
BODY is the body of the loop and PRE_BODY statements that go before
the loop. */
tree
c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv,
tree incrv, tree body, tree pre_body)
{
location_t elocus;
bool fail = false;
int i;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
fail = true;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error_at (elocus, "%qE is not initialized", decl);
init = integer_zero_node;
fail = true;
}
init = build_modify_expr (elocus, decl, NOP_EXPR, init);
}
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
if (cond == NULL_TREE)
{
error_at (elocus, "missing controlling predicate");
fail = true;
}
else
{
bool cond_ok = false;
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR
|| TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1 (NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1 (NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
if (TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
cond_ok = false;
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MIN_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? GT_EXPR : LE_EXPR);
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MAX_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? LT_EXPR : GE_EXPR);
else
cond_ok = false;
}
}
if (!cond_ok)
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
if (POINTER_TYPE_P (TREE_TYPE (decl))
&& TREE_OPERAND (incr, 1))
{
tree t = fold_convert (sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1 (NEGATE_EXPR, sizetype, t);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
break;
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (OMP_FOR);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
/* Divide CLAUSES into two lists: those that apply to a parallel construct,
and those that apply to a work-sharing construct. Place the results in
*PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait
clause to the work-sharing list. */
void
c_split_parallel_clauses (tree clauses, tree *par_clauses, tree *ws_clauses)
{
tree next;
*par_clauses = NULL;
*ws_clauses = build_omp_clause (OMP_CLAUSE_NOWAIT);
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_DEFAULT:
OMP_CLAUSE_CHAIN (clauses) = *par_clauses;
*par_clauses = clauses;
break;
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
OMP_CLAUSE_CHAIN (clauses) = *ws_clauses;
*ws_clauses = clauses;
break;
default:
gcc_unreachable ();
}
}
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Variables with const-qualified type having no mutable member
are predetermined shared. */
if (TREE_READONLY (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
|
prueba_tasks.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include "omp.h"
void llama_tarea(int p);
void tarea(int p);
int main()
{
const int n = 10;
int p = 3; //variable para seleccionar el número de hilos
omp_set_num_threads(p);
#pragma omp parallel
#pragma omp single
llama_tarea(p);
return 0;
}
void llama_tarea(int p)
{
tarea(p);
if (p>0)
{
#pragma omp task
llama_tarea(p-1);
#pragma omp task
llama_tarea(p-1);
}
}
void tarea(int p)
{
int myid = omp_get_thread_num();
int nthreads = omp_get_num_threads();
#pragma omp single
printf("soy el hilo %d de %d en la iteración %d\n", myid, nthreads, p);
}
|
LAGraph_bfs_both.c | //------------------------------------------------------------------------------
// LAGraph_bfs_both: push-pull breadth-first search: DOES BOTH PUSH AND PULL
//
// THIS CODE IS SLOW because it does BOTH the PUSH and the PULL step at all
// levels, just to time both methods.
//
// !!!!!!!!!!!!!!! DO NOT BENCHMARK !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_internal.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
GrB_Info LAGraph_bfs_both // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
, FILE * logfile
)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) && ( GxB_IMPLEMENTATION >= GxB_VERSION (5,0,0) )
printf ("v5.0.0 not supported\n") ;
return (GrB_PANIC) ;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if (v_output == NULL || (A == NULL && AT == NULL))
{
// required output argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
bool use_vxm_with_A ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
if (A == NULL)
{
// only AT is provided
LAGr_Matrix_ncols (&nrows, AT) ;
LAGr_Matrix_nrows (&ncols, AT) ;
LAGr_Matrix_nvals (&nvalA, AT) ;
use_vxm_with_A = false ;
}
else
{
// A is provided. AT may or may not be provided
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nvalA, A) ;
use_vxm_with_A = true ;
}
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// check the format of A and AT
//--------------------------------------------------------------------------
bool csr = true ;
// csr is true if A and AT are known (or assumed) to be in CSR format; if
// false, they are known to be in CSC format.
// This can be tested in SuiteSparse:GraphBLAS. Other libraries can use
// this section for their own library-specific tests, if they have them.
// LAGraph_bfs_pushpull will work just fine if nothing is changed or if the
// following is disabled (even SuiteSparse:GraphBLAS). The push/pull
// behaviour will be unpredicatble, however, unless the library default
// format is CSR.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
// The CSR vs CSC status can be tested in SuiteSparse:GraphBLAS.
// However, even with SuiteSparse:GraphBLAS, this step is optional.
GxB_Format_Value A_format = -1, AT_format = -1 ;
bool A_csr = true, AT_csr = true ;
if (A != NULL)
{
// A_csr is true if accessing A(i,:) is fast
LAGr_get (A , GxB_FORMAT, &A_format) ;
A_csr = (A_format == GxB_BY_ROW) ;
}
if (AT != NULL)
{
// AT_csr is true if accessing AT(i,:) is fast
LAGr_get (AT, GxB_FORMAT, &AT_format) ;
AT_csr = (AT_format == GxB_BY_ROW) ;
}
// Assume CSR if A(i,:) and AT(i,:) are both fast. If csr is false,
// then the algorithm below will reverse the use of vxm and mxv.
csr = A_csr && AT_csr ;
if (push_pull)
{
// both A and AT are provided. Require they have the same format.
// Either both A(i,:) and AT(i,:) are efficient to accesss, or both
// A(:,j) and AT(:,j) are efficient to access.
if (A_csr != AT_csr)
{
LAGRAPH_ERROR ("A and AT must in the same format:\n"
"both GxB_BY_ROW, or both GxB_BY_COL",
GrB_INVALID_VALUE) ;
}
}
else
{
// only A or AT are provided. Refuse to do the pull-only version.
if (A != NULL && A_format == GxB_BY_COL)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: AT not provided, so A must be GxB_BY_ROW\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
if (AT != NULL && AT_format == GxB_BY_ROW)
{
// this would result in a pull-only BFS ... exceedingly slow
LAGRAPH_ERROR (
"SuiteSparse: A not provided, so AT must be GxB_BY_COL\n"
"(or provide both A and AT, both in the same format,\n"
"either both GxB_BY_COL or both GxB_BY_ROW)",
GrB_INVALID_VALUE) ;
}
}
#endif
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads = LAGraph_get_nthreads ( ) ;
nthreads = LAGRAPH_MIN (n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN (n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
LAGr_Vector_new (&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX (256, sqrt ((double) n)) ;
if (!vsparse)
{
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
LAGr_assign (v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
GrB_Semiring first_semiring, second_semiring ;
if (compute_tree)
{
// create an integer vector q, and set q(source) to source+1
LAGr_Vector_new (&q, int_type, n) ;
LAGr_Vector_setElement (q, source+1, source) ;
if (n > INT32_MAX)
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT64 ;
second_semiring = LAGraph_MIN_SECOND_INT64 ;
#endif
}
else
{
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
#else
// deterministic, but cannot terminate early
first_semiring = LAGraph_MIN_FIRST_INT32 ;
second_semiring = LAGraph_MIN_SECOND_INT32 ;
#endif
}
// create the empty parent vector
LAGr_Vector_new (&pi, int_type, n) ;
if (!vsparse)
{
// make pi a dense vector of all zeros
LAGr_assign (pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
LAGr_Vector_setElement (pi, source+1, source) ;
}
else
{
// create a boolean vector q, and set q(source) to true
LAGr_Vector_new (&q, GrB_BOOL, n) ;
LAGr_Vector_setElement (q, true, source) ;
#if defined ( GxB_SUITESPARSE_GRAPHBLAS ) \
&& ( GxB_IMPLEMENTATION >= GxB_VERSION (3,2,0) )
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
#else
// can terminate early, but requires more data movement internally
first_semiring = LAGraph_LOR_FIRST_BOOL ;
second_semiring = LAGraph_LOR_SECOND_BOOL ;
#endif
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for (int64_t level = 1 ; ; level++)
{
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
LAGr_assign (v, q, NULL, level, GrB_ALL, n, GrB_DESC_S) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if (nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if (vsparse && nvisited > vlimit)
{
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
LAGr_assign (v, v, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
LAGr_Vector_nvals (&ignore, v) ;
if (compute_tree)
{
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
LAGr_assign (pi, pi, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
LAGr_Vector_nvals (&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
// if (push_pull)
// {
double pushwork = d * nq ;
double expected = (double) n / (double) (nvisited+1) ;
double per_dot = LAGRAPH_MIN (d, expected) ;
double binarysearch = (3 * (1 + log2 ((double) nq))) ;
double pullwork = (n-nvisited) * per_dot * binarysearch ;
// use_vxm_with_A = (pushwork < pullwork) ;
// if (!csr)
// {
// // Neither A(i,:) nor AT(i,:) is efficient. Instead, both
// // A(:,j) and AT(:,j) is fast (that is, the two matrices
// // are in CSC format). Swap the
// use_vxm_with_A = !use_vxm_with_A ;
// }
// }
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
// DO BOTH PUSH AND PULL and log the timings.
double tic [2] ;
LAGraph_tic (tic) ;
{
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
GrB_Vector q2 ;
LAGr_Vector_new (&q2, compute_tree ? int_type : GrB_BOOL, n) ;
LAGr_mxv (q2, v, NULL, second_semiring, AT, q, GrB_DESC_RC) ;
LAGr_free (&q2) ;
}
double t_pull = LAGraph_toc (tic) ;
LAGraph_tic (tic) ;
{
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
LAGr_vxm (q, v, NULL, first_semiring, q, A, GrB_DESC_RC) ;
}
double t_push = LAGraph_toc (tic) ;
// log the timings
fprintf (logfile, "%g %g %g %g\n",
(double) nq, (double) nvisited, t_pull, t_push) ;
fflush (logfile) ;
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if (compute_tree)
{
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
LAGr_assign (pi, q, NULL, q, GrB_ALL, n, GrB_DESC_S) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
#ifdef GxB_SUITESPARSE_GRAPHBLAS
GrB_Index *qi ;
GrB_Index q_size ;
GrB_Index qi_size, qx_size ;
bool jumbled ;
if (n > INT32_MAX)
{
int64_t *qx ;
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_export_CSC (&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_export_CSC (&q, &int_type, &n, &q_size, &nq,
&jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
#endif
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int64_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_import_CSC (&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_import_CSC (&q, int_type, n, q_size, nq,
jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
#endif
}
else
{
int32_t *qx ;
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_export_CSC (&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_export_CSC (&q, &int_type, &n, &q_size, &nq,
&jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_export (&q, &int_type, &n, &nq, &qi,
(void **) (&qx), NULL) ;
#endif
int nth = LAGRAPH_MIN (nq / (64*1024), nthreads) ;
nth = LAGRAPH_MAX (nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (int32_t k = 0 ; k < nq ; k++)
{
qx [k] = qi [k] + 1 ;
}
#if GxB_IMPLEMENTATION >= GxB_VERSION (4,0,1)
LAGr_Vector_import_CSC (&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
#elif GxB_IMPLEMENTATION == GxB_VERSION (4,0,0)
LAGr_Vector_import_CSC (&q, int_type, n, q_size, nq,
jumbled, &qi, (void **) (&qx), NULL) ;
#else
LAGr_Vector_import (&q, int_type, n, nq, &qi,
(void **) (&qx), NULL) ;
#endif
}
#else
// TODO: use extractTuples and build instead
// Or use something like:
// extract tuples into I
// let e = 1:n be created once, in initialization phase
// q<q> = e (I)
fprintf (stderr, "TODO: use extractTuples here\n") ;
abort ( ) ;
#endif
}
else
{
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
LAGr_Vector_nvals (&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if (compute_tree)
{
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
#endif
}
|
fornali.c | /*
----------------------------------------------------
-- Parallel maximal sub-sequence project --
Author: FORNALI Damien
Grade: Master I - IFI
University: Nice-Sophia-Antipolis
Year: 2017-2018
Project subject link: https://sites.google.com/site/fabricehuet/teaching/parallelisme-et-distribution/sous-sequence-maximale
----------------------------------------------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#include <math.h>
#include <omp.h>
/* - Structures -*/
struct array {
long long* data;
unsigned long long size;
};
/*
-- Declarations --
*/
/* - Core - */
void ascent(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit);
void pre_downhill(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit);
void suf_downhill(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit);
void ultimate(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long));
struct array* prefix(struct array* src, unsigned long long size, long long (*ibinary_fun)(long long, long long), long long bInit, unsigned long long organiseOffset);
struct array* suffix(struct array* src, unsigned long long size, long long (*ibinary_fun)(long long, long long), long long bInit, unsigned long long organiseOffset);
struct array* computeM(struct array* src, struct array* psum, struct array* ssum, struct array* smax, struct array* pmax);
long long findMax(struct array* M);
void findMSS(struct array* src, struct array* M);
void computeMSS(const char* file_name);
/* - Tools - */
struct array* parse(const char* file_name);
struct array* allocateArray(unsigned long long size);
long long plus(long long left, long long right);
long long max(long long left, long long right);
int isPow2(unsigned long long value);
void organise(struct array* src, unsigned long long offset);
void restore(struct array* src, unsigned long long noffset);
void destroy(struct array* arr);
void printSolution(long long max, struct array* src, unsigned long long startIndex, unsigned long long endIndex);
void lightPrintSolution(long long max);
/*
-- Definitions --
*/
/*
- Core -
*/
/**
Classic prefix / suffix ascent phase.
a: the source array
b: the destination array
ibinary_fun: the int binary function called
bInit: initially, the destination array is filled with this value
- [Parallel] -
*/
inline void ascent(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit) {
// first, fill the destination with bInit
#pragma omp parallel for
for(unsigned long long i = 0; i < a->size; ++i)
b->data[i] = bInit;
// then with the source content
#pragma omp parallel for
for(unsigned long long i = a->size; i < b->size; ++i)
b->data[i] = a->data[i - a->size];
/* performs the tree ascent,
l varies from (m - 1) to 1 included */
for(unsigned long long l = (log2(a->size)) - 1; l > 0; --l){
// j varies from 2^l to (2^(l + 1) - 1) included
#pragma omp parallel for
for(unsigned long long j = 1 << l; j < (1 << (l + 1)); ++j)
b->data[j] = (*ibinary_fun)(b->data[j << 1], b->data[(j << 1) + 1]);
}
}
/**
Prefix algorithm downhill phase.
a: the source array
b: the destination array
ibinary_fun: the int binary function called
bInit: initially, the destination array is filled with this value
e.g. of result tree:
0
0 15
0 10 15 16
- [Parallel] -
*/
inline void pre_downhill(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit) {
// fill the destination with bInit
#pragma omp parallel for
for(unsigned long long i = 0; i < b->size; ++i)
b->data[i] = bInit;
/* performs the tree downhill
l varies from 1 to (m- 1) included */
for(unsigned long long l = 1; l < log2(a->size); ++l){
// j varies from 2^l to (2^(l + 1) - 1) included
#pragma omp parallel for
for(unsigned long long j = 1 << l; j < 1 << (l + 1); ++j){
if(j % 2 == 0)
b->data[j] = b->data[j >> 1];
else
b->data[j] = (*ibinary_fun)(b->data[j >> 1], a->data[j - 1]);
}
}
}
/**
Suffix algorithm downhill phase.
The algorithm is similar to the prefix one but the resulted tree
is the prefix's one mirror by the centered axe.
a: the source array
b: the destination array
ibinary_fun: the int binary function called
bInit: initially, the destination array is filled with this value
e.g. of result tree:
0
15 0
16 15 10 0
- [Parallel] -
*/
inline void suf_downhill(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long), long long bInit) {
// fill the destination with bInit
#pragma omp parallel for
for(unsigned long long i = 0; i < b->size; ++i)
b->data[i] = bInit;
/* performs the tree downhill
l varies from 1 to (m- 1) included */
for(unsigned long long l = 1; l < log2(a->size); ++l){
// j varies from 2^l to (2^(l + 1) - 1) included
#pragma omp parallel for
for(unsigned long long j = 1 << l; j < 1 << (l + 1); ++j){
// odd part
if(j % 2 == 1)
b->data[j] = b->data[j >> 1];
// even part
else
b->data[j] = (*ibinary_fun)(b->data[j >> 1], a->data[j + 1]);
}
}
}
/**
Classic prefix / suffix ultimate phase.
a: the source array
b: the destination array
ibinary_fun: the int binary function called
- [Parallel] -
*/
inline void ultimate(struct array* a, struct array* b, long long (*ibinary_fun)(long long, long long)) {
unsigned long long m = log2(a->size);
// i varies from 2^(m - 1) to (2^m) - 1 included
#pragma omp parallel for
for(unsigned long long i = 1 << (m - 1); i < 1 << m; ++i)
b->data[i] = (*ibinary_fun)(b->data[i], a->data[i]);
}
/**
The prefix algorithm.
src: the prefix source
size: the new allocations size
ibinary_fun: the int binary function used in the different phases
bInit: value used to initially fill some arrays
organiseOffset: value used to organise some arrays
- [Parallel] -
*/
inline struct array* prefix(struct array* src, unsigned long long size, long long (*ibinary_fun)(long long, long long),
long long bInit, unsigned long long organiseOffset){
// create a
struct array* a = allocateArray(size);
// phase 1 with 'src' as the source and 'a' as the destination
ascent(src, a, ibinary_fun, bInit);
// create b
struct array* b = allocateArray(size);
// phase 2 with 'a' as the source and 'b' as the destination, prefix-specific
pre_downhill(a, b, ibinary_fun, bInit);
// phase 3 with 'a' as the source and 'b' as the destination
ultimate(a, b, ibinary_fun);
// free the 'a' array memory, not used anymore
destroy(a);
// re-organise the destination array before return
organise(b, organiseOffset);
return b;
}
/**
The suffix algorithm.
src: the prefix source
size: the new allocations size
ibinary_fun: the int binary function used in the different phases
bInit: value used to initially fill some arrays
organiseOffset: value used to organise some arrays
- [Parallel] -
*/
inline struct array* suffix(struct array* src, unsigned long long size, long long (*ibinary_fun)(long long, long long),
long long bInit, unsigned long long organiseOffset){
// create a
struct array* a = allocateArray(size);
// phase 1 with 'src' as the source and 'a' as the destination
ascent(src, a, ibinary_fun, bInit);
// create b
struct array* b = allocateArray(size);
// phase 2 with 'a' as the source and 'b' as the destination, suffix-specific
suf_downhill(a, b, ibinary_fun, bInit);
// phase 3 with 'a' as the source and 'b' as the destination
ultimate(a, b, ibinary_fun);
// free the 'a' array memory, not used anymore
destroy(a);
// re-organise the destination array before return
organise(b, organiseOffset);
return b;
}
/**
The MSS algorithn fifth phase.
Assumes that the inputs has been re-organised, i.e. all have same size.
src: the MSS algorithm input data
psum: the src's sum-prefix
ssum: the src's sum-suffix
smax: the psum's max-suffix
pmax: the ssum's max-prefix
- [Parallel] -
*/
inline struct array* computeM(struct array* src, struct array* psum, struct array* ssum,
struct array* smax, struct array* pmax){
// the resulted M array
struct array* M = allocateArray(src->size);
// the M computing
#pragma omp parallel for
for(unsigned long long i = 0; i < src->size; ++i){
M->data[i] = (pmax->data[i] - ssum->data[i] + src->data[i]) +
(smax->data[i] - psum->data[i] + src->data[i]) -
src->data[i];
}
return M;
}
/**
The MSS algorithm sixth phase.
Finds the maximum value in the inquired array.
Uses an omp parallel reduction.
src: the source array
- [Parallel] -
*/
#pragma omp declare reduction(maximum : long long : omp_out = omp_in > omp_out ? omp_in : omp_out)
inline long long findMax(struct array* src){
long long max = LONG_MIN;
#pragma omp parallel for reduction(maximum:max)
for(unsigned long long i = 0; i < src->size; ++i){
if(src->data[i] > max)
max = src->data[i];
}
// omp parallelizing returns 0 for
// all negative data input
if(max == 0){
max = LONG_MIN;
for(unsigned long long i = 0; i < src->size; ++i){
if(src->data[i] > max)
max = src->data[i];
}
}
return max;
}
/**
The last MSS algorithm phase.
Finds the maximum sub-sequence in src according to the M array.
src: the MSS algorithm input data
M: the sixth phase result
- [Parallel] -
*/
inline void findMSS(struct array* src, struct array* M){
// get the maximum value of M
long long maxValue = findMax(M);
// the two indices used to locate the MSS
long long startIndex = -1;
long long endIndex = -1;
if(src->size < 1){
printf("Unknown source data.\n");
return;
}
// handle the alone-element case
if(src->size == 1){
startIndex = 0;
endIndex = 0;
} else if(src->size == 2){
// handle all possibilities of the two-elements case
if(src->data[0] < 0){
if(src->data[1] < 0){
startIndex = (src->data[0] < src->data[1]) ? 1 : 0;
endIndex = startIndex;
} else {
startIndex = 1;
endIndex = 1;
}
} else {
if(src->data[1] < 0){
startIndex = 0;
endIndex = 0;
} else {
startIndex = 0;
endIndex = 1;
}
}
} else {
/* handles the n elements
where n > 2 and a power of two */
// if the first element is the beginning of the sentence
if(M->data[0] == maxValue)
startIndex = 0;
// if the last element is the end of the sentence
if(M->data[M->size - 1] == maxValue)
endIndex = M->size - 1;
// if one of the indices is unset we iterate over M
if(startIndex == -1 || endIndex == -1){
#pragma omp parallel for
for(unsigned long long i = 1; i < M->size - 1; ++i){
// if the current M value is the maximum value
if(M->data[i] == maxValue){
// if the startIndex is unset and
// the previous element is not the max value,
// it means we are at the beginning of the MSS
if(startIndex == -1 && M->data[i - 1] != maxValue)
startIndex = i;
// if the endIndex is unset and
// the next element is not the max value,
// it means we are at the end of the MSS
if(endIndex == -1 && M->data[i + 1] != maxValue)
endIndex = i;
}
}
} else if(startIndex != -1 && endIndex != -1){
/*
- Sequential loop -
Allows to handle the "tricky mirror" case.
See archive-root/resources/trickyMirror/trickyMirrorTest.txt
*/
long long sumCheck = 0;
// updates the end index
for(unsigned long long i = startIndex; i <= endIndex; ++i){
sumCheck += src->data[i];
if(sumCheck == maxValue){
endIndex = i;
break;
}
}
}
}
/* The following is for unhandled cases.
For instance the case where the maximum value is only
the source's first element:
e.g. (src) 3 2 -7 11 */
if(startIndex < 0 || endIndex < 0){
// if the first element is equal to the max value
if(M->data[0] == maxValue){
startIndex = 0;
endIndex = 0;
} else if(M->data[M->size - 1] == maxValue){
// if the last element is equal to the max value
startIndex = M->size - 1;
endIndex = startIndex;
} else {
printf("Error found while looking for a maximal sub-sequence.\n");
return;
}
}
// this is the final solution print !
printSolution(maxValue, src, startIndex, endIndex);
// lightPrintSolution(maxValue);
}
/**
-- The MSS algorithm entry point --
Computes the six phases, print the solution
and free the memory used.
file_name: the data source file name
- [Parallel] -
*/
inline void computeMSS(const char* file_name){
// get the source data
struct array* src = parse(file_name);
/*
The data file will be considered as always well formed, i.e. composed of a sequence of relative integers separated by spaces.
The table will be n = 2^m in size.
if(!isPow2(src->size)){
printf("Input file size must be a power of 2.\n");
return;
}
*/
/* Computes the prefixed sum (PSUM) - phase 1 */
struct array* prefixedSum = prefix(src, src->size << 1, &plus, 0, src->size);
/* Computes the suffixed sum (SSUM) - phase 2 */
struct array* suffixedSum = suffix(src, src->size << 1, &plus, 0, src->size);
/* Computes the suffixed max (SMAX) - phase 3 */
long long suffixedMaxOffset = prefixedSum->size;
struct array* suffixedMax = suffix(prefixedSum, prefixedSum->size << 1, &max, LONG_MIN, prefixedSum->size);
/* Computes the prefixed max (PMAX) - phase 4 */
long long prefixedMaxOffset = suffixedSum->size;
struct array* prefixedMax = prefix(suffixedSum, suffixedSum->size << 1, &max, LONG_MIN, suffixedSum->size);
/* Computes the M array - phase 5 */
struct array* M = computeM(src, prefixedSum, suffixedSum, suffixedMax, prefixedMax);
/* Finds the maximal subsequence sum - last phases (also prints the solution) */
findMSS(src, M);
// restore before destruction
restore(prefixedSum, src->size);
restore(suffixedSum, src->size);
restore(suffixedMax, suffixedMaxOffset);
restore(prefixedMax, prefixedMaxOffset);
// free the memory
destroy(src);
destroy(prefixedSum);
destroy(suffixedSum);
destroy(suffixedMax);
destroy(prefixedMax);
destroy(M);
}
/*
--Tools -
*/
/**
Parses a file and produces an array from the int data collected.
file_name: the data source file name
- [Parallel] -
*/
inline struct array* parse(const char* file_name){
FILE* file = fopen(file_name, "r");
long long size = 0;
long long ptr = 0;
// iterates one first time to obtain the array size
while(fscanf(file, "%lld", &ptr) == 1)
size++;
struct array* src;
src = allocateArray(size);
ptr = 0;
rewind(file);
long long tmp;
// iterates a second time to obtain the array data
while(ptr < size){
fscanf(file, "%lld", &tmp);
src->data[ptr++] = tmp;
}
fclose(file);
return src;
}
/**
Allocates a struct array.
size: the number of elements of the generated array
*/
inline struct array* allocateArray(unsigned long long size) {
struct array* tmp = malloc(sizeof(struct array));
tmp->size = size;
tmp->data = malloc(size * sizeof(long long));
return tmp;
}
/** The plus binary fonction. */
inline long long plus(long long left, long long right){
return left + right;
}
/** The max binary fonction. */
inline long long max(long long left, long long right){
return (left > right) ? left : right;
}
/** Is 'value' a power of two ? (to check input data) */
inline int isPow2(unsigned long long value){
return ceil(log2(value)) == floor(log2(value));
}
/**
Moves forward the array base adress of an offset value.
Reduces its size accordingly.
src: the array
offset: the offset value
*/
inline void organise(struct array* src, unsigned long long offset){
src->data += offset;
src->size -= offset;
}
/**
Moves backward the array base adress of an offset value.
Reduces its size accordingly.
src: the array
noffset: the offset value
*/
inline void restore(struct array* src, unsigned long long noffset){
src->data -= noffset;
src->size += noffset;
}
/** Free a struct array. */
inline void destroy(struct array* arr){
free(arr->data);
free(arr);
}
/**
Prints the final solution.
(May produce a segmentation fault with huge arrays, with such, use the alternative 'lightPrintSolution' function ?)
max: the MSS algorithm max value computed
src: the MSS algorithm input data
startIndex: the start of the MSS
endIndex: the end of the MSS
*/
inline void printSolution(long long max, struct array* src, unsigned long long startIndex, unsigned long long endIndex){
printf("%lld ", max);
for(unsigned long long i = startIndex; i < endIndex; ++i){
printf("%lld ", src->data[i]);
}
printf("%lld\n", src->data[endIndex]);
}
/** Alternative, prints the inquired value. */
inline void lightPrintSolution(long long max){
printf("%lld\n", max);
}
/* - The MSS algorithn entry point - */
int main(int argc, char** argv){
if(argc != 2){
printf("Wrong command arguments.\n");
return -1;
}
// The input data is passed as an argument in the command line
computeMSS(argv[1]);
return 0;
} |
kCDensestSamplingKclistpp.c | /*
Info:
This program corresponds to "Seq-Sampling++" in the PVLDB 2020 paper.
Feel free to use these lines as you wish.
This program iterates over all k-cliques, randomly saves a small part of them
in the main memory, iterates over these sampled k-cliques for many rounds and
report the approximate maximum k-clique density.
Note that this program can only handle k >= 3, i.e., k = 2 is not supported.
To compile:
"gcc kCDensestSamplingKclistpp.c BinaryHeap.c Graph.c -O3 -o kCDensestSamplingKclistpp -lm -fopenmp"
To execute:
"./kCDensestSamplingKclistpp p T k edgeListFileName"
p is the number of threads.
T is the number of iterations of the "++" operation (will be rounded down to
the nearest power of 2).
k is the size of a clique considered as in "k-clique". It must be at least 3.
edgeListFileName is the name of the file that contains the graph. Each line of
the file contains one edge represented by two integers separated by a space.
Output:
Evolution of the approximate k-clique densest subgraph. One record per line,
containing
- the number of nodes in the approximate k-clique densest subgraph;
- the number of edges in the approximate k-clique densest subgraph;
- the edge density of the approximate k-clique densest subgraph;
- the k-clique density of the approximate k-clique densest subgraph;
- the computed upper bound on the maximum k-clique density;
- the time elapsed since the beginning of the execution.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <limits.h>
#include "Graph.h"
static int UnsignedCmp(const void *a, const void *b) {
return (long long)*(unsigned *)a - (long long)*(unsigned *)b;
}
inline int LargeRand() {
if (RAND_MAX == 0x7fff)
return (rand() << 15) | rand();
return rand();
}
inline int GetRandMax() {
if (RAND_MAX == 0x7fff)
return 0x3fffffff;
return RAND_MAX;
}
typedef enum {COUNT = 1, SAMPLING = 2, COUNT_IN_SUBGRAPH = 3} task_t;
static unsigned *original_graph_id_sg2g = NULL, *original_graph_id_g2sg = NULL; // to improve (???)
#pragma omp threadprivate(original_graph_id_g2sg, original_graph_id_sg2g)
unsigned *densest_subgraph_id_sg2g = NULL, *densest_subgraph_id_g2sg = NULL;
Subgraph *AllocSubgraph(Graph *g, unsigned char k) {
Subgraph *sg = (Subgraph *)malloc(sizeof(Subgraph));
sg->n = (unsigned *)calloc(k, sizeof(unsigned));
sg->d = (unsigned **)malloc(k * sizeof(unsigned *));
sg->adj = (unsigned *)malloc(g->core * g->core * sizeof(unsigned));
sg->label = (unsigned char *)calloc(g->core, sizeof(unsigned char));
sg->nodes = (unsigned **)malloc(k * sizeof(unsigned *));
sg->core = g->core;
for (unsigned i = 1; i < k; ++i){
sg->d[i] = (unsigned *)malloc(g->core * sizeof(unsigned));
sg->nodes[i] = (unsigned *)malloc(g->core * sizeof(unsigned));
}
return sg;
}
void MakeSubgraph(Graph *g, unsigned u, unsigned v, Subgraph *sg, unsigned char k, unsigned *id_sg2g, unsigned *id_g2sg, task_t task) {
if (id_sg2g == NULL){
id_g2sg = (unsigned *)malloc(g->n * sizeof(unsigned));
id_sg2g = (unsigned *)malloc(g->core * sizeof(unsigned));
for (unsigned i = 0; i < g->n; ++i) {
id_g2sg[i] = UINT_MAX;
}
}
for (unsigned i = 0; i < sg->n[k - 1]; ++i) {
sg->label[i] = 0;
}
for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) { // For each out-neighbor of v
id_g2sg[g->adj[i]] = UINT_MAX - 1;
}
unsigned j = 0;
for (unsigned i = g->cd[u]; i < g->cd[u + 1]; ++i) { // For each out-neighbor of u
unsigned x = g->adj[i];
if (id_g2sg[x] == UINT_MAX - 1) {
id_g2sg[x] = j;
id_sg2g[j] = x;
sg->label[j] = k - 2;
sg->nodes[k - 2][j] = j;
sg->d[k - 2][j] = 0; // New degrees
++j;
}
}
sg->n[k - 2] = j;
for (unsigned i = 0; i < sg->n[k - 2]; ++i) { // Reorder adjacency list and compute new degrees
unsigned x = id_sg2g[i];
for (unsigned l = g->cd[x]; l < g->cd[x + 1]; ++l) {
unsigned y = g->adj[l];
j = id_g2sg[y];
if (j < UINT_MAX - 1) {
sg->adj[sg->core * i + sg->d[k - 2][i]++] = j;
}
}
}
for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) {
id_g2sg[g->adj[i]] = -1;
}
if (task == COUNT || task == SAMPLING) {
original_graph_id_g2sg = id_g2sg;
original_graph_id_sg2g = id_sg2g;
} else {
densest_subgraph_id_g2sg = id_g2sg;
densest_subgraph_id_sg2g = id_sg2g;
}
}
// ==========
// kCList: the clique-listing procedure
// ==========
unsigned CLIQUES_TO_SAMPLE = 10000000;
unsigned sampled_cliques_reserved_size; // Maximum number of cliques for memory allocation; will increase if needed
unsigned *cknodes; // Nodes of a clique being formed
unsigned *ck; // List of all sampled cliques
unsigned *p_ckend; // Pointer to the end of ck[]
unsigned long long cnt_clique; // Number of cliques
unsigned long long cnt_sampled_clique; // Number of sampled cliques
double sampling_prob; // Sampling probability
unsigned long long cnt_clique_in_densest_subgraph; // Number of cliques in the densest subgraph (without sampling)
void KCLIST_CliqueEnumThread(Subgraph *sg, unsigned char clique_size, unsigned char l, task_t task) {
if (clique_size == 3) {
for (unsigned i = 0; i < sg->n[1]; ++i) {
unsigned u = sg->nodes[1][i];
if (task == SAMPLING)
cknodes[0] = original_graph_id_sg2g[u]; // When task == COUNT_IN_SUBGRAPH, cknodes is useless
switch (task) {
case COUNT: {
++cnt_clique;
break;
}
case SAMPLING: {
if (LargeRand() >= (GetRandMax() + 1LL) * sampling_prob) // Store this clique with probability sampling_prob
break;
#pragma omp critical
{
if (cnt_sampled_clique >= sampled_cliques_reserved_size) {
sampled_cliques_reserved_size *= 2;
ck = (unsigned *)realloc(ck, sampled_cliques_reserved_size * clique_size * sizeof(unsigned));
p_ckend = ck + cnt_sampled_clique * clique_size;
}
for (unsigned j = 0; j < clique_size; ++j)
*(p_ckend++) = cknodes[j];
++cnt_sampled_clique;
}
break;
}
case COUNT_IN_SUBGRAPH: {
++cnt_clique_in_densest_subgraph;
break;
}
}
}
return;
}
if (l == 2) {
for (unsigned i = 0; i < sg->n[2]; ++i) {
unsigned u = sg->nodes[2][i];
if (task == SAMPLING)
cknodes[1] = original_graph_id_sg2g[u];
for (unsigned j = u * sg->core, end = u * sg->core + sg->d[2][u]; j < end; ++j) {
unsigned v = sg->adj[j];
if (task == SAMPLING)
cknodes[0] = original_graph_id_sg2g[v];
switch (task) {
case COUNT: {
++cnt_clique;
break;
}
case SAMPLING: {
if (LargeRand() > (GetRandMax() + 1LL) * sampling_prob) // Store this clique with probability sampling_prob
break;
#pragma omp critical
{
if (cnt_sampled_clique >= sampled_cliques_reserved_size) {
sampled_cliques_reserved_size *= 2;
ck = (unsigned *)realloc(ck, sampled_cliques_reserved_size * clique_size * sizeof(unsigned));
p_ckend = ck + cnt_sampled_clique * clique_size;
}
for (unsigned k = 0; k < clique_size; ++k)
*(p_ckend++) = cknodes[k];
++cnt_sampled_clique;
}
break;
}
case COUNT_IN_SUBGRAPH: {
++cnt_clique_in_densest_subgraph;
break;
}
}
}
}
return;
}
for (unsigned i = 0; i < sg->n[l]; ++i) { // Enumerate in reverse order. Very confusing! "++i" is actually the reverse order.
unsigned u = sg->nodes[l][i];
if (task == SAMPLING)
cknodes[l - 1] = original_graph_id_sg2g[u];
sg->n[l - 1] = 0;
unsigned end = u * sg->core + sg->d[l][u];
for (unsigned j = u * sg->core; j < end; ++j) { // Relabel nodes and forming U'.
unsigned v = sg->adj[j];
if (sg->label[v] == l) {
sg->label[v] = l - 1;
sg->nodes[l - 1][sg->n[l - 1]++] = v;
sg->d[l - 1][v] = 0; // New degrees
}
}
for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Reorder adjacency list and compute new degrees
unsigned v = sg->nodes[l - 1][j];
for (unsigned k = sg->core * v, end = sg->core * v + sg->d[l][v]; k < end; ++k) {
unsigned w = sg->adj[k];
if (sg->label[w] == l - 1) {
++sg->d[l - 1][v];
}
else{
sg->adj[k--] = sg->adj[--end];
sg->adj[end] = w;
}
}
qsort(sg->adj + sg->core * v, sg->d[l - 1][v], sizeof(unsigned), UnsignedCmp); // Sort the nodes in reverse order
}
KCLIST_CliqueEnumThread(sg, clique_size, l - 1, task);
for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Restore labels
unsigned v = sg->nodes[l - 1][j];
sg->label[v] = l;
}
}
}
void KCLIST_CliqueEnum(Graph *g, unsigned char k, task_t task) {
Subgraph *sg;
switch (task) {
case COUNT: {
cnt_clique = 0;
break;
}
case SAMPLING: {
cnt_sampled_clique = 0;
sampled_cliques_reserved_size = 1.1 * CLIQUES_TO_SAMPLE;
sampling_prob = (CLIQUES_TO_SAMPLE < cnt_clique) ? (double)CLIQUES_TO_SAMPLE / cnt_clique : 1;
p_ckend = ck = (unsigned *)malloc(sampled_cliques_reserved_size * k * sizeof(unsigned));
break;
}
case COUNT_IN_SUBGRAPH: {
cnt_clique_in_densest_subgraph = 0;
break;
}
}
if (task == COUNT || task == SAMPLING) {
#pragma omp parallel private(sg) reduction(+: cnt_sampled_clique)
{
cknodes = (unsigned *)malloc(k * sizeof(unsigned));
sg = AllocSubgraph(g, k);
#pragma omp for schedule(dynamic, 1) nowait
for(unsigned i = 0; i < g->e; ++i) {
cknodes[k - 1] = g->edges[i].s;
cknodes[k - 2] = g->edges[i].t;
MakeSubgraph(g, g->edges[i].s, g->edges[i].t, sg, k, original_graph_id_sg2g, original_graph_id_g2sg, task);
KCLIST_CliqueEnumThread(sg, k, k - 2, task);
}
free(cknodes);
FreeSubgraph(sg, k);
}
} else {
cknodes = (unsigned *)malloc(k * sizeof(unsigned));
sg = AllocSubgraph(g, k);
densest_subgraph_id_g2sg = densest_subgraph_id_sg2g = NULL;
for (unsigned i = 0; i < g->e; ++i) {
MakeSubgraph(g, g->edges[i].s, g->edges[i].t, sg, k, densest_subgraph_id_sg2g, densest_subgraph_id_g2sg, task);
KCLIST_CliqueEnumThread(sg, k, k - 2, task);
}
free(densest_subgraph_id_g2sg);
free(densest_subgraph_id_sg2g);
free(cknodes);
FreeSubgraph(sg, k);
}
switch (task) {
case COUNT: {
printf("Number of %u-cliques: %llu\n", k, cnt_clique);
break;
}
case SAMPLING: {
ck = (unsigned *)realloc(ck, cnt_sampled_clique * k * sizeof(unsigned));
printf("Number of sampled %u-cliques: %llu\n", k, cnt_sampled_clique);
break;
}
case COUNT_IN_SUBGRAPH: {
// printf("Number of %u-cliques in the densest subgraph: %llu\n", k, cnt_clique_in_densest_subgraph);
// printf("Density: %.12f\n", (double)cnt_clique_in_densest_subgraph / g->n);
break;
}
}
}
unsigned *perm;
unsigned *rho;
unsigned *ordered_dec_rho;
unsigned *rank_of_rho;
unsigned *rho_pushed_to_max_rank;
bool *is_in_densest_subgraph; // Whether each node is in the densest subgraph
typedef struct {
unsigned n; // Number of nodes
unsigned m; // Number of edges
double density;
double ub; // An upper bound of maximum density
} DensestSubsetInfo;
static int NodeRhoValueCmp(const void *a, const void *b) {
return rho[*(const unsigned *)b] - rho[*(const unsigned *)a];
}
void InMemoryFrankWolfe(Graph *g, const unsigned char k) {
for (int i = cnt_sampled_clique - 1; i >= 0; --i) {
// Shuffle
unsigned id = LargeRand() % (i + 1);
unsigned temp = perm[i];
perm[i] = perm[id];
perm[id] = temp;
// Sequential update
id = perm[i];
unsigned node_getting_weight = ck[id * k];
for (unsigned j = 1; j < k; ++j) {
if (rho[ck[id * k + j]] < rho[node_getting_weight])
node_getting_weight = ck[id * k + j];
}
++rho[node_getting_weight];
}
}
DensestSubsetInfo ExtractDensest(Graph *g, const unsigned char k, unsigned T) {
// Sort the nodes in decreasing order of rho value
DensestSubsetInfo info;
for (unsigned i = 0; i < g->n; ++i) {
ordered_dec_rho[i] = i;
rho_pushed_to_max_rank[i] = 0;
}
qsort(ordered_dec_rho, g->n, sizeof(unsigned), NodeRhoValueCmp); // Reorder the nodes by decreasing rho values
for (unsigned i = 0; i < g->n; ++i)
rank_of_rho[ordered_dec_rho[i]] = i;
// Iterate over all sampled cliques
for (unsigned i = 0; i < cnt_sampled_clique; ++i) {
unsigned node_getting_weight = ck[i * k];
for (unsigned j = 1; j < k; ++j) {
if (rank_of_rho[ck[i * k + j]] > rank_of_rho[node_getting_weight])
node_getting_weight = ck[i * k + j];
}
++rho_pushed_to_max_rank[rank_of_rho[node_getting_weight]];
}
// Find the densest subset
info.density = -1;
for (unsigned i = 0, cnt_clique_in_subgraph = 0; i < g->n; ++i) {
cnt_clique_in_subgraph += rho_pushed_to_max_rank[i];
if (info.density < (double)cnt_clique_in_subgraph / (i + 1)) {
info.n = i + 1;
info.m = cnt_clique_in_subgraph;
info.density = (double)cnt_clique_in_subgraph / (i + 1);
}
}
for (unsigned i = 0; i < info.n; ++i)
is_in_densest_subgraph[ordered_dec_rho[i]] = true;
for (unsigned i = info.n; i < g->n; ++i)
is_in_densest_subgraph[ordered_dec_rho[i]] = false;
// Compute an upper bound of maximum density
unsigned sum = 0;
info.ub = 0;
double ip1ck = 0; // (i + 1) choose k
for (unsigned i = 0; i < g->n; ++i) {
sum += rho[ordered_dec_rho[i]];
if (i + 1 == k)
ip1ck = 1;
else if (i + 1 > k)
ip1ck = (ip1ck * (i + 1)) / (i + 1 - k);
if (ip1ck < (double)sum / T)
info.ub = ip1ck / (i + 1);
else {
if (info.ub < (double)sum / T / (i + 1))
info.ub = (double)sum / T / (i + 1);
break;
}
}
return info;
}
EdgeList *MakeDensestSubgraphEdgeList(Graph *g, const unsigned char k, const unsigned densest_subset_size) {
EdgeList *el = (EdgeList *)malloc(sizeof(EdgeList));
el->n = densest_subset_size;
el->e = 0;
for (unsigned i = 0; i < g->e; ++i)
el->e += (is_in_densest_subgraph[g->edges[i].s] && is_in_densest_subgraph[g->edges[i].t]);
el->edges = (Edge *)malloc(el->e * sizeof(Edge));
for (unsigned i = 0, j = 0; i < g->e; ++i) {
if (is_in_densest_subgraph[g->edges[i].s] && is_in_densest_subgraph[g->edges[i].t]) {
el->edges[j].s = rank_of_rho[g->edges[i].s];
el->edges[j].t = rank_of_rho[g->edges[i].t];
++j;
}
}
return el;
}
void SampleCliques(Graph *g, const unsigned char k) {
// Count the number of clqiues
KCLIST_CliqueEnum(g, k, COUNT);
// Sampling
KCLIST_CliqueEnum(g, k, SAMPLING);
}
void Solve(Graph *g, const unsigned char k, unsigned num_iter, clock_t t0) {
perm = (unsigned *)malloc(cnt_sampled_clique * sizeof(unsigned));
rho = (unsigned *)calloc(g->n, sizeof(unsigned)); // Initialized to 0 automatically
is_in_densest_subgraph = (bool *)malloc(g->n * sizeof(bool));
ordered_dec_rho = (unsigned *)malloc(g->n * sizeof(unsigned));
rank_of_rho = (unsigned *)malloc(g->n * sizeof(unsigned));
rho_pushed_to_max_rank = (unsigned *)calloc(g->n, sizeof(unsigned)); // Initialized to 0 automatically
for (unsigned i = 0; i < cnt_sampled_clique; ++i)
perm[i] = i;
for (unsigned T = 1, t = 1; T <= num_iter; T <<= 1) {
// Step 1: run the Frank-Wolfe based algorithm for num_iter rounds
for (; t <= T; ++t) {
if (t % 100 == 0)
printf("Run round %u...\n", t);
InMemoryFrankWolfe(g, k);
}
// Step 2: give a tentative decomposition
DensestSubsetInfo info = ExtractDensest(g, k, T);
// Step 3: count the number of cliques in the densest subset by constructing another Graph
EdgeList *el = MakeDensestSubgraphEdgeList(g, k, info.n);
SortByCore(el);
Relabel(el);
Graph *p_densest_subgraph = MakeGraph(el);
KCLIST_CliqueEnum(p_densest_subgraph, k, COUNT_IN_SUBGRAPH);
//DensestSubsetInfo info = CDF_FindDensestSubset(g, k, T);
clock_t t1 = clock();
double edge_density = p_densest_subgraph->e * 2.0 / p_densest_subgraph->n / (p_densest_subgraph->n - 1);
double density = (double)cnt_clique_in_densest_subgraph / p_densest_subgraph->n;
double upper_bound = info.ub / sampling_prob / (1 - sqrt(6 * log(g->n) / info.ub));
printf("Approximate densest subgraph: %u nodes, %u edges, edge density = %f, k-clique density = %f, upper bound = %f. %ld milliseconds.\n", p_densest_subgraph->n, p_densest_subgraph->e, edge_density, density, upper_bound, (t1 - t0) * 1000 / CLOCKS_PER_SEC);
free(p_densest_subgraph);
//fprintf(ofp, "%u\t%u\t%u\t%.12f\t%.12f\t%ld\n", T, info.n, info.m, info.density, info.ub, t1 - t0);
fflush(stdout);
}
free(perm);
free(rho);
free(is_in_densest_subgraph);
free(ordered_dec_rho);
free(rank_of_rho);
free(rho_pushed_to_max_rank);
}
int main(int argc, char **argv) {
srand(time(NULL));
EdgeList *el;
Graph *g;
unsigned num_threads = atoi(argv[1]);
unsigned num_iter = atoi(argv[2]);
unsigned char k = atoi(argv[3]);
char *file_name = argv[4];
omp_set_num_threads(num_threads);
clock_t t0, t1, t2;
t0 = t1 = clock();
printf("Reading edgelist from file %s\n", file_name);
el = ReadEdgeList(file_name);
printf("Number of nodes = %u\n", el->n);
printf("Number of edges = %u\n", el->e);
t2 = clock();
printf("- Time = %ldh%ldm%lds%ldms\n",(t2 - t1) / CLOCKS_PER_SEC / 3600, ((t2 - t1) / CLOCKS_PER_SEC % 3600) / 60, ((t2 - t1) / CLOCKS_PER_SEC % 60), (t2 - t1) % CLOCKS_PER_SEC * 1000 / CLOCKS_PER_SEC);
t1 = t2;
printf("Building the graph structure\n");
SortByCore(el); // Do core decomposition and render degeneracy ordering to the nodes
Relabel(el);
g = MakeGraph(el);
printf("Number of nodes (degree > 0) = %u\n", g->n);
t2 = clock();
printf("- Time = %ldh%ldm%lds%ldms\n", (t2 - t1) / CLOCKS_PER_SEC / 3600, ((t2 - t1) / CLOCKS_PER_SEC % 3600) / 60, ((t2 - t1) / CLOCKS_PER_SEC % 60), (t2 - t1) % CLOCKS_PER_SEC * 1000 / CLOCKS_PER_SEC);
t1 = t2;
SampleCliques(g, k);
t2 = clock();
printf("- Time = %ldh%ldm%lds%ldms\n", (t2 - t1) / CLOCKS_PER_SEC / 3600, ((t2 - t1) / CLOCKS_PER_SEC % 3600) / 60, ((t2 - t1) / CLOCKS_PER_SEC % 60), (t2 - t1) % CLOCKS_PER_SEC * 1000 / CLOCKS_PER_SEC);
t1 = t2;
Solve(g, k, num_iter, t0);
t2 = clock();
printf("- Time = %ldh%ldm%lds%ldms\n", (t2 - t1) / CLOCKS_PER_SEC / 3600, ((t2 - t1) / CLOCKS_PER_SEC % 3600) / 60, ((t2 - t1) / CLOCKS_PER_SEC % 60), (t2 - t1) % CLOCKS_PER_SEC * 1000 / CLOCKS_PER_SEC);
t1 = t2;
FreeGraph(g);
printf("- Overall time = %ldh%ldm%lds%ldms\n", (t2 - t0) / CLOCKS_PER_SEC / 3600, ((t2 - t0) / CLOCKS_PER_SEC % 3600) / 60, ((t2 - t0) / CLOCKS_PER_SEC % 60), (t2 - t0) % CLOCKS_PER_SEC * 1000 / CLOCKS_PER_SEC);
//fprintf(ofp, "%ld\n", t2 - t0);
//fclose(ofp);
return 0;
}
|
billownoise.h | #pragma once
#ifndef BILLOW_NOISE_H
#define BILLOW_NOISE_H
#include "noisecommon.h"
#define DEFAULT_BILLOW_FREQUENCY 1.0
#define DEFAULT_BILLOW_LACUNARITY 2.0
#define DEFAULT_BILLOW_PERSISTENCE 0.5
#define DEFAULT_BILLOW_OCTAVE_COUNT 6
#define DEFAULT_BILLOW_SEED 0
#define DEFAULT_BILLOW_POSITION_X 0.0
#define DEFAULT_BILLOW_POSITION_Y 0.0
#define DEFAULT_BILLOW_POSITION_Z 0.0
#define DEFAULT_BILLOW_STEP 0.01
#define DEFAULT_BILLOW_PARALLEL false
#define DEFAULT_BILLOW_QUALITY QUALITY_STANDARD
struct BillowNoise {
float frequency;
float lacunarity;
float persistence;
unsigned char octave_count;
int seed;
float position[3];
float step;
bool parallel;
float *(*billow_func)(struct BillowNoise *, size_t, size_t, size_t);
enum NoiseQuality noise_quality;
};
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size);
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size);
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos);
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline float *billow_noise_eval_3d_avx512(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size);
static inline void billow_noise_init(struct BillowNoise *billow_noise) {
billow_noise->frequency = DEFAULT_BILLOW_FREQUENCY;
billow_noise->lacunarity = DEFAULT_BILLOW_LACUNARITY;
billow_noise->persistence = DEFAULT_BILLOW_PERSISTENCE;
billow_noise->octave_count = DEFAULT_BILLOW_OCTAVE_COUNT;
billow_noise->seed = DEFAULT_BILLOW_SEED;
billow_noise->noise_quality = DEFAULT_BILLOW_QUALITY;
billow_noise->position[0] = DEFAULT_BILLOW_POSITION_X;
billow_noise->position[1] = DEFAULT_BILLOW_POSITION_Y;
billow_noise->position[2] = DEFAULT_BILLOW_POSITION_X;
billow_noise->step = DEFAULT_BILLOW_STEP;
billow_noise->parallel = DEFAULT_BILLOW_PARALLEL;
switch (detect_simd_support()) {
#ifdef ARCH_32_64
case NOISE_SIMD_AVX512F:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
case NOISE_SIMD_AVX2:
billow_noise->billow_func = &billow_noise_eval_3d_avx2;
break;
case NOISE_SIMD_AVX:
billow_noise->billow_func = &billow_noise_eval_3d_avx;
break;
case NOISE_SIMD_SSE4_1:
billow_noise->billow_func = &billow_noise_eval_3d_sse4_1;
break;
case NOISE_SIMD_SSE2:
billow_noise->billow_func = &billow_noise_eval_3d_sse2;
break;
#else
case SIMD_NEON:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
#endif
default:
billow_noise->billow_func = &billow_noise_eval_3d_fallback;
break;
}
}
static inline float *billow_noise_eval_1d(struct BillowNoise *billow_noise, size_t x_size) {
return billow_noise->billow_func(billow_noise, x_size, 1, 1);
}
static inline float *billow_noise_eval_2d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, 1);
}
static inline float *billow_noise_eval_3d(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
return billow_noise->billow_func(billow_noise, x_size, y_size, z_size);
}
static inline float billow_noise_eval_3d_single(struct BillowNoise *billow_noise, float x_pos, float y_pos, float z_pos) {
float x = (billow_noise->position[0] + (x_pos * billow_noise->step)) * billow_noise->frequency;
float y = (billow_noise->position[1] + (y_pos * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_pos * billow_noise->step)) * billow_noise->frequency;
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
return value;
}
static inline float *billow_noise_eval_3d_fallback(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
#ifdef CUSTOM_ALLOCATOR
float *noise_set = malloc(sizeof(float) * x_size * y_size * z_size);
#else
float *noise_set = noise_allocate(sizeof(float), sizeof(float) * x_size * y_size * z_size);
#endif
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim++) {
float x = (billow_noise->position[0] * billow_noise->frequency) + (x_dim * billow_noise->step);
float y = (billow_noise->position[1] * billow_noise->frequency) + (y_dim * billow_noise->step);
float z = (billow_noise->position[2] * billow_noise->frequency) + (z_dim * billow_noise->step);
float value = 0.0;
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
float nx = make_int_32_range(x);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
float signal = gradient_coherent_noise_3d(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = 2.0 * fabs(signal) - 1.0;
value += signal * cur_persistence;
x *= billow_noise->lacunarity;
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value += 0.5;
*(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size)))) = value;
}
}
}
return noise_set;
}
#ifdef ARCH_32_64
#ifdef SIMD_SSE2
static inline float *billow_noise_eval_3d_sse2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_SSE41
static inline float *billow_noise_eval_3d_sse4_1(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 4) {
__m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(billow_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(billow_noise->step))), _mm_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m128 value = _mm_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m128 nx = make_int_32_range_sse2(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m128 signal = gradient_coherent_noise_3d_sse4_1(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm_sub_ps(_mm_mul_ps(_mm_set1_ps(2.0), _mm_andnot_ps(_mm_set1_ps(-0.0), signal)), _mm_set1_ps(1.0));
value = _mm_add_ps(value, _mm_mul_ps(signal, _mm_set1_ps(cur_persistence)));
x_vec = _mm_mul_ps(x_vec, _mm_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm_add_ps(value, _mm_set1_ps(0.5));
_mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_AVX
static inline float *billow_noise_eval_3d_avx(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#ifdef SIMD_AVX2
static inline float *billow_noise_eval_3d_avx2(struct BillowNoise *billow_noise, size_t x_size, size_t y_size, size_t z_size) {
float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size);
#pragma omp parallel for collapse(3) if (billow_noise->parallel)
for (int z_dim = 0; z_dim < z_size; z_dim++) {
for (int y_dim = 0; y_dim < y_size; y_dim++) {
for (int x_dim = 0; x_dim < x_size; x_dim += 8) {
__m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(billow_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(billow_noise->step))), _mm256_set1_ps(billow_noise->frequency));
float y = (billow_noise->position[1] + (y_dim * billow_noise->step)) * billow_noise->frequency;
float z = (billow_noise->position[2] + (z_dim * billow_noise->step)) * billow_noise->frequency;
__m256 value = _mm256_set1_ps(0.0);
float cur_persistence = 1.0;
for (int cur_octave = 0; cur_octave < billow_noise->octave_count; cur_octave++) {
__m256 nx = make_int_32_range_avx(x_vec);
float ny = make_int_32_range(y);
float nz = make_int_32_range(z);
int cur_seed = (billow_noise->seed + cur_octave) & 0xffffffff;
__m256 signal = gradient_coherent_noise_3d_avx2(nx, ny, nz, cur_seed, billow_noise->noise_quality);
signal = _mm256_sub_ps(_mm256_mul_ps(_mm256_set1_ps(2.0), _mm256_andnot_ps(_mm256_set1_ps(-0.0), signal)), _mm256_set1_ps(1.0));
value = _mm256_add_ps(value, _mm256_mul_ps(signal, _mm256_set1_ps(cur_persistence)));
x_vec = _mm256_mul_ps(x_vec, _mm256_set1_ps(billow_noise->lacunarity));
y *= billow_noise->lacunarity;
z *= billow_noise->lacunarity;
cur_persistence *= billow_noise->persistence;
}
value = _mm256_add_ps(value, _mm256_set1_ps(0.5));
_mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), value);
}
}
}
return noise_set;
}
#endif
#endif
#endif // BILLOW_NOISE_H
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include "libperf_int.h"
#include <ucs/debug/log.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->prev.time = perf->start_time;
}
static void ucx_perf_test_reset(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->params = *params;
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + perf->start_time;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.time = perf->start_time;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
perf->offset = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
double factor;
double sec_value;
sec_value = ucs_time_from_sec(1.0);
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time - perf->start_time;
/* Latency */
result->latency.typical =
__find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE)
/ sec_value
/ factor;
result->latency.moment_average =
(double)(perf->current.time - perf->prev.time)
/ (perf->current.iters - perf->prev.iters)
/ sec_value
/ factor;
result->latency.total_average =
(double)(perf->current.time - perf->start_time)
/ perf->current.iters
/ sec_value
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->bandwidth.total_average =
perf->current.bytes * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->msgrate.total_average =
perf->current.msgs * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline uint64_t __get_atomic_flag(size_t size, uint64_t flag32, uint64_t flag64)
{
return (size == 4) ? flag32 :
(size == 8) ? flag64 :
0;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uct_iface_attr_t attr;
ucs_status_t status;
uint64_t required_flags;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, 0,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, 0,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_ADD32,
UCT_IFACE_FLAG_ATOMIC_ADD64);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_FADD32,
UCT_IFACE_FLAG_ATOMIC_FADD64);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_SWAP32,
UCT_IFACE_FLAG_ATOMIC_SWAP64);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
required_flags = __get_atomic_flag(message_size, UCT_IFACE_FLAG_ATOMIC_CSWAP32,
UCT_IFACE_FLAG_ATOMIC_CSWAP64);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
if (!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Device does not support required operation");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small");
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size too big");
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size larger than message size");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window too large (should be <= %d)",
UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr,
&perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
rte_call(perf, barrier);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
rte_call(perf, barrier);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, UCT_CB_FLAG_SYNC);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
ucp_params->features |= UCP_FEATURE_TAG;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucs_status_t status;
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->send_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCP_MEM_MAP_NONBLOCK : 0;
mem_map_params.flags |= UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params,
&perf->ucp.send_memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.send_memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
perf->send_buffer = mem_attr.address;
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = perf->recv_buffer;
mem_map_params.length = buffer_size * params->thread_count;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
status = ucp_mem_map(perf->ucp.context, &mem_map_params, &perf->ucp.recv_memh);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(perf->ucp.recv_memh, &mem_attr);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
perf->recv_buffer = mem_attr.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
err_free_send_buffer:
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
ucp_mem_unmap(perf->ucp.context, perf->ucp.recv_memh);
ucp_mem_unmap(perf->ucp.context, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
ucs_status_ptr_t *reqs;
ucp_tag_recv_info_t info;
ucs_status_t status;
unsigned i;
reqs = calloc(sizeof(*reqs), group_size);
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep);
}
}
for (i = 0; i < group_size; ++i) {
if (!UCS_PTR_IS_PTR(reqs[i])) {
continue;
}
do {
ucp_worker_progress(perf->ucp.worker);
status = ucp_request_test(reqs[i], &info);
} while (status == UCS_INPROGRESS);
ucp_request_release(reqs[i]);
}
free(reqs);
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = UCS_OK;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
rte_call(perf, barrier);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_md_resource_desc_t *md_resources;
uct_tl_resource_desc_t *tl_resources;
unsigned i, num_md_resources;
unsigned j, num_tl_resources;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_md_resources(&md_resources, &num_md_resources);
if (status != UCS_OK) {
goto out;
}
for (i = 0; i < num_md_resources; ++i) {
status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_open(md_resources[i].md_name, md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_md_resources;
}
for (j = 0; j < num_tl_resources; ++j) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_md_resources;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_md_resources:
uct_release_md_resource_list(md_resources);
out:
return status;
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = 0;
status = ucp_perf_test_fill_params(params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = params->thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
rte_call(perf, barrier);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf, ucx_perf_params_t *params);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup, uct_perf_test_dispatch},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup, ucp_perf_test_dispatch}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_reset(perf, params);
status = ucx_perf_funcs[params->api].setup(perf, params);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
rte_call(perf, barrier);
ucx_perf_test_reset(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
#include <omp.h>
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
ucx_perf_test_reset(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
rte_call(perf, barrier);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
lca_comms.h | /*
//@HEADER
// *****************************************************************************
//
// XtraPuLP: Xtreme-Scale Graph Partitioning using Label Propagation
// Copyright (2016) Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact George M. Slota (gmslota@sandia.gov)
// Siva Rajamanickam (srajama@sandia.gov)
// Kamesh Madduri (madduri@cse.psu.edu)
//
// *****************************************************************************
//@HEADER
*/
#ifndef _LCA_COMMS_H_
#define _LCA_COMMS_H_
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include "comms.h"
#include "bicc_dist.h"
#include "util.h"
extern int procid, nprocs;
extern bool verbose, debug, verify;
#define MAX_SEND_SIZE 2147483648
#define THREAD_QUEUE_SIZE 1024
struct lca_thread_data_t {
int32_t tid;
uint64_t* thread_queue;
uint64_t* thread_finish;
uint64_t thread_queue_size;
uint64_t thread_finish_size;
};
struct lca_queue_data_t {
uint64_t* queue;
uint64_t* queue_next;
uint64_t* finish;
uint64_t queue_size;
uint64_t next_size;
uint64_t finish_size;
};
inline void init_queue_lca(dist_graph_t* g, lca_queue_data_t* lcaq){
if (debug) { printf("Task %d init_queue_lca() start\n", procid);}
uint64_t queue_size = g->n_local + g->n_ghost;
lcaq->queue = (uint64_t*)malloc(100*queue_size*sizeof(uint64_t));
lcaq->queue_next = (uint64_t*)malloc(100*queue_size*sizeof(uint64_t));
lcaq->finish = (uint64_t*)malloc(100*queue_size*sizeof(uint64_t));
if (lcaq->queue == NULL || lcaq->queue_next == NULL || lcaq->finish == NULL)
throw_err("init_queue_lca(), unable to allocate resources\n",procid);
lcaq->queue_size = 0;
lcaq->next_size = 0;
lcaq->finish_size = 0;
if(debug){printf("Task %d init_queue_lca() success\n", procid); }
}
inline void clear_queue_lca(lca_queue_data_t* lcaq){
if(debug){ printf("Task %d clear_queue_lca() start\n",procid); }
free(lcaq->queue);
free(lcaq->queue_next);
free(lcaq->finish);
if(debug) {printf("Task %d clear_queue_lca() success\n", procid); }
}
inline void init_thread_lca(lca_thread_data_t* lcat) {
if (debug) { printf("Task %d init_thread_queue() start\n", procid);}
lcat->tid = omp_get_thread_num();
lcat->thread_queue = (uint64_t*)malloc(THREAD_QUEUE_SIZE*sizeof(uint64_t));
lcat->thread_finish = (uint64_t*)malloc(THREAD_QUEUE_SIZE*sizeof(uint64_t));
if (lcat->thread_queue == NULL || lcat->thread_finish == NULL)
throw_err("init_thread_lca(), unable to allocate resources\n", procid, lcat->tid);
lcat->tid = omp_get_thread_num();
lcat->thread_queue_size = 0;
lcat->thread_finish_size = 0;
if (debug) {printf("Task %d init_thread_queue() success\n", procid); }
}
inline void clear_thread_lca(lca_thread_data_t* lcat){
free(lcat->thread_queue);
free(lcat->thread_finish);
}
inline void init_sendbuf_lca(mpi_data_t* comm){
comm->sdispls_temp[0] = 0;
comm->total_send = comm->sendcounts_temp[0];
for (int32_t i = 1; i < nprocs; ++i){
comm->sdispls_temp[i] = comm->sdispls_temp[i-1] + comm->sendcounts_temp[i-1];
comm->total_send += comm->sendcounts_temp[i];
}
if (debug) printf("Task %d total_send %lu\n", procid, comm->total_send);
comm->sendbuf_vert = (uint64_t*)malloc(comm->total_send*sizeof(uint64_t));
if (comm->sendbuf_vert == NULL)
throw_err("init_sendbuf_lca(), unable to allocate resources\n", procid);
}
inline void clear_recvbuf_lca(mpi_data_t* comm){
free(comm->recvbuf_vert);
for (int32_t i = 0; i < nprocs; ++i)
comm->sendcounts[i] = 0;
for (int32_t i = 0; i < nprocs; ++i)
comm->sendcounts_temp[i] = 0;
}
inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1,
uint64_t vert2, uint64_t pred2, uint64_t level2);
inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq);
inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1);
inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq);
inline void update_lca_send(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
inline void empty_lca_send(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq);
inline void update_lca_finish(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
//(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank);
inline void empty_lca_finish(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq);
inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm);
inline void add_to_lca(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1,
uint64_t vert2, uint64_t pred2, uint64_t level2)
{
lcat->thread_queue[lcat->thread_queue_size++] = vert1;
lcat->thread_queue[lcat->thread_queue_size++] = pred1;
lcat->thread_queue[lcat->thread_queue_size++] = level1;
lcat->thread_queue[lcat->thread_queue_size++] = vert2;
lcat->thread_queue[lcat->thread_queue_size++] = pred2;
lcat->thread_queue[lcat->thread_queue_size++] = level2;
if (lcat->thread_queue_size+6 >= THREAD_QUEUE_SIZE)
empty_lca_queue(lcat, lcaq);
}
inline void empty_lca_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = lcaq->next_size += lcat->thread_queue_size;
start_offset -= lcat->thread_queue_size;
for (uint64_t i = 0; i < lcat->thread_queue_size; ++i)
lcaq->queue_next[start_offset + i] = lcat->thread_queue[i];
lcat->thread_queue_size = 0;
}
inline void add_to_finish(lca_thread_data_t* lcat, lca_queue_data_t* lcaq,
uint64_t vert1, uint64_t pred1, uint64_t level1)
{
lcat->thread_finish[lcat->thread_finish_size++] = vert1;
lcat->thread_finish[lcat->thread_finish_size++] = pred1;
lcat->thread_finish[lcat->thread_finish_size++] = level1;
if (lcat->thread_finish_size+3 >= THREAD_QUEUE_SIZE)
empty_finish_queue(lcat, lcaq);
}
inline void empty_finish_queue(lca_thread_data_t* lcat, lca_queue_data_t* lcaq)
{
uint64_t start_offset;
#pragma omp atomic capture
start_offset = lcaq->finish_size += lcat->thread_finish_size;
start_offset -= lcat->thread_finish_size;
for (uint64_t i = 0; i < lcat->thread_finish_size; ++i)
lcaq->finish[start_offset + i] = lcat->thread_finish[i];
lcat->thread_finish_size = 0;
}
inline void update_lca_send(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
{
tc->sendbuf_rank_thread[tc->thread_queue_size/6] = send_rank;
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+3];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+4];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+5];
//++tc->thread_queue_size;
//++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size+6 >= THREAD_QUEUE_SIZE)
empty_lca_send(tc, comm, lcaq);
}
inline void empty_lca_send(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; i+=6)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i/6];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] =
tc->sendbuf_vert_thread[i+1];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] =
tc->sendbuf_vert_thread[i+2];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+3] =
tc->sendbuf_vert_thread[i+3];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+4] =
tc->sendbuf_vert_thread[i+4];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+5] =
tc->sendbuf_vert_thread[i+5];
tc->thread_starts[cur_rank] += 6;
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
inline void update_lca_finish(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
{
// for (int32_t i = 0; i < nprocs; ++i)
// tc->v_to_rank[i] = false;
// uint64_t out_degree = out_degree(g, vert_index);
// uint64_t* outs = out_vertices(g, vert_index);
// for (uint64_t j = 0; j < out_degree; ++j)
// {
// uint64_t out_index = outs[j];
// if (out_index >= g->n_local)
// {
// int32_t out_rank = g->ghost_tasks[out_index - g->n_local];
// if (!tc->v_to_rank[out_rank])
// {
// tc->v_to_rank[out_rank] = true;
// add_vid_data_to_send(tc, comm,
// g->local_unmap[vert_index], data, out_rank);
// }
// }
// }
tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank;
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+1];
tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->finish[index+2];
//++tc->thread_queue_size;
//++tc->sendcounts_thread[send_rank];
if (tc->thread_queue_size+6 >= THREAD_QUEUE_SIZE)
empty_lca_finish(tc, comm, lcaq);
}
// inline void add_data_to_finish(thread_comm_t* tc, mpi_data_t* comm,
// lca_queue_data_t* lcaq, uint64_t index, int32_t send_rank)
// {
// tc->sendbuf_rank_thread[tc->thread_queue_size/3] = send_rank;
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+1];
// tc->sendbuf_vert_thread[tc->thread_queue_size++] = lcaq->queue_next[index+2];
// ++tc->thread_queue_size;
// ++tc->sendcounts_thread[send_rank];
// if (tc->thread_queue_size+3 >= THREAD_QUEUE_SIZE)
// empty_lca_finish(tc, comm, lcaq);
// }
inline void empty_lca_finish(thread_comm_t* tc, mpi_data_t* comm,
lca_queue_data_t* lcaq)
{
for (int32_t i = 0; i < nprocs; ++i)
{
#pragma omp atomic capture
tc->thread_starts[i] = comm->sdispls_temp[i] += tc->sendcounts_thread[i];
tc->thread_starts[i] -= tc->sendcounts_thread[i];
}
for (uint64_t i = 0; i < tc->thread_queue_size; i+=3)
{
int32_t cur_rank = tc->sendbuf_rank_thread[i/3];
comm->sendbuf_vert[tc->thread_starts[cur_rank]] =
tc->sendbuf_vert_thread[i];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+1] =
tc->sendbuf_vert_thread[i+1];
comm->sendbuf_vert[tc->thread_starts[cur_rank]+2] =
tc->sendbuf_vert_thread[i+2];
tc->thread_starts[cur_rank] += 3;
}
for (int32_t i = 0; i < nprocs; ++i)
{
tc->thread_starts[i] = 0;
tc->sendcounts_thread[i] = 0;
}
tc->thread_queue_size = 0;
}
inline void exchange_lca(dist_graph_t* g, mpi_data_t* comm)
{
for (int32_t i = 0; i < nprocs; ++i)
comm->recvcounts_temp[i] = 0;
for (int32_t i = 0; i < nprocs; ++i)
comm->sdispls_temp[i] -= comm->sendcounts_temp[i];
MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T,
comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD);
comm->total_recv = 0;
for (int i = 0; i < nprocs; ++i)
comm->total_recv += comm->recvcounts_temp[i];
comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t));
if (comm->recvbuf_vert == NULL)
throw_err("exchange_lca() unable to allocate recv buffers", procid);
uint64_t task_queue_size = comm->total_send;
uint64_t current_global_size = 0;
MPI_Allreduce(&task_queue_size, ¤t_global_size, 1,
MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD);
uint64_t num_comms = current_global_size / (uint64_t)MAX_SEND_SIZE + 1;
uint64_t sum_recv = 0;
uint64_t sum_send = 0;
for (uint64_t c = 0; c < num_comms; ++c)
{
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
comm->sendcounts[i] = (int32_t)(send_end - send_begin);
assert(comm->sendcounts[i] >= 0);
}
MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T,
comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD);
comm->sdispls[0] = 0;
comm->sdispls_cpy[0] = 0;
comm->rdispls[0] = 0;
for (int32_t i = 1; i < nprocs; ++i)
{
comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1];
comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1];
comm->sdispls_cpy[i] = comm->sdispls[i];
}
int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1];
int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1];
uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t));
if (buf_v == NULL)
throw_err("exchange_verts(), unable to allocate comm buffers", procid);
for (int32_t i = 0; i < nprocs; ++i)
{
uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms;
uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms;
if (c == (num_comms-1))
send_end = comm->sendcounts_temp[i];
for (uint64_t j = send_begin; j < send_end; ++j)
{
uint64_t data = comm->sendbuf_vert[comm->sdispls_temp[i]+j];
buf_v[comm->sdispls_cpy[i]++] = data;
}
}
MPI_Alltoallv(buf_v, comm->sendcounts,
comm->sdispls, MPI_UINT64_T,
comm->recvbuf_vert+sum_recv, comm->recvcounts,
comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD);
free(buf_v);
sum_recv += cur_recv;
sum_send += cur_send;
}
free(comm->sendbuf_vert);
assert(sum_recv == comm->total_recv);
assert(sum_send == comm->total_send);
}
#endif
|
local_operator.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file local_operator.h
*
* \brief Contains declaration and implementation of sirius::Local_operator class.
*/
#ifndef __LOCAL_OPERATOR_H__
#define __LOCAL_OPERATOR_H__
#include "periodic_function.h"
#ifdef __GPU
extern "C" void add_pw_ekin_gpu(int num_gvec__,
double const* pw_ekin__,
cuDoubleComplex const* vphi__,
cuDoubleComplex* hphi__);
#endif
namespace sirius {
/// Representation of the local operator.
/** The following functionality is implementated:
* - application of the local part of Hamiltonian (kinetic + potential) to the wave-fucntions in the PP-PW case
* - application of the interstitial part of H and O in the case of FP-LAPW
* - remapping of potential and unit-step functions from fine to coarse mesh of G-vectors
*/
class Local_operator
{
private:
Simulation_parameters const* param_{nullptr};
/// Coarse-grid FFT driver for this operator
FFT3D& fft_coarse_;
/// Kinetic energy of G+k plane-waves.
mdarray<double, 1> pw_ekin_;
/// Effective potential components.
mdarray<double, 2> veff_vec_;
mdarray<double_complex, 1> vphi1_;
mdarray<double_complex, 1> vphi2_;
mdarray<double, 1> theta_;
mdarray<double_complex, 1> buf_rg_;
/// V(G=0) matrix elements.
double v0_[2];
public:
/// Constructor.
Local_operator(Simulation_parameters const& param__,
FFT3D& fft_coarse__)
: param_(¶m__)
, fft_coarse_(fft_coarse__)
{
}
/// This constructor is used internally in the debug and performance tests only.
Local_operator(FFT3D& fft_coarse__, Gvec const& gvec__)
: fft_coarse_(fft_coarse__)
{
veff_vec_ = mdarray<double, 2>(fft_coarse_.local_size(), 1, memory_t::host, "Local_operator::veff_vec_");
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
veff_vec_(ir, 0) = 2.71828;
}
int ngv_fft = gvec__.partition().gvec_count_fft();
pw_ekin_ = mdarray<double, 1>(ngv_fft, memory_t::host, "Local_operator::pw_ekin");
pw_ekin_.zero();
vphi1_ = mdarray<double_complex, 1>(ngv_fft, memory_t::host, "Local_operator::vphi1");
vphi2_ = mdarray<double_complex, 1>(ngv_fft, memory_t::host, "Local_operator::vphi2");
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
veff_vec_.allocate(memory_t::device);
veff_vec_.copy_to_device();
pw_ekin_.allocate(memory_t::device);
pw_ekin_.copy_to_device();
vphi1_.allocate(memory_t::device);
vphi2_.allocate(memory_t::device);
}
#endif
}
/// Map effective potential and magnetic field to a coarse FFT mesh in case of PP-PW.
/** \param [in] gvec_coarse G-vectors of the coarse FFT grid.
* \param [in] num_mag_dims Number of magnetic dimensions.
* \param [in] effective_potential \f$ V_{eff}({\bf r}) \f$ on the fine grid FFT grid.
* \param [in] effective_magnetic_field \f$ {\bf B}_{eff}({\bf r}) \f$ on the fine FFT grid.
*
* This function should be called prior to the band diagonalziation. In case of GPU execution all
* effective fields on the coarse grid will be copied to the device and will remain there until the
* dismiss() method is called after band diagonalization.
*/
inline void prepare(Gvec const& gvec_coarse__,
int num_mag_dims__,
Periodic_function<double>* effective_potential__,
Periodic_function<double>* effective_magnetic_field__[3])
{
PROFILE("sirius::Local_operator::prepare");
/* group effective fields into single vector */
std::vector<Periodic_function<double>*> veff_vec(num_mag_dims__ + 1);
veff_vec[0] = effective_potential__;
for (int j = 0; j < num_mag_dims__; j++) {
veff_vec[1 + j] = effective_magnetic_field__[j];
}
/* allocate only once */
if (!veff_vec_.size()) {
veff_vec_ = mdarray<double, 2>(fft_coarse_.local_size(), num_mag_dims__ + 1, memory_t::host, "Local_operator::veff_vec_");
}
/* low-frequency part of PW coefficients */
std::vector<double_complex> v_pw_coarse(gvec_coarse__.partition().gvec_count_fft());
/* prepare FFT for transformation */
fft_coarse_.prepare(gvec_coarse__.partition());
/* map components of effective potential to a corase grid */
for (int j = 0; j < num_mag_dims__ + 1; j++) {
/* loop over low-frequency G-vectors */
for (int ig = 0; ig < gvec_coarse__.partition().gvec_count_fft(); ig++) {
/* G-vector in fractional coordinates */
auto G = gvec_coarse__.gvec(ig + gvec_coarse__.partition().gvec_offset_fft());
v_pw_coarse[ig] = veff_vec[j]->f_pw(G);
}
/* transform to real space */
fft_coarse_.transform<1>(gvec_coarse__.partition(), &v_pw_coarse[0]);
/* save V(r) */
fft_coarse_.output(&veff_vec_(0, j));
}
fft_coarse_.dismiss();
if (num_mag_dims__) {
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
double v0 = veff_vec_(ir, 0);
double v1 = veff_vec_(ir, 1);
veff_vec_(ir, 0) = v0 + v1; // v + Bz
veff_vec_(ir, 1) = v0 - v1; // v - Bz
}
}
if (num_mag_dims__ == 0) {
v0_[0] = veff_vec[0]->f_pw(0).real();
} else {
v0_[0] = veff_vec[0]->f_pw(0).real() + veff_vec[1]->f_pw(0).real();
v0_[1] = veff_vec[0]->f_pw(0).real() - veff_vec[1]->f_pw(0).real();
}
/* copy veff to device */
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
veff_vec_.allocate(memory_t::device);
veff_vec_.copy_to_device();
}
#endif
}
/// Map effective potential and magnetic field to a coarse FFT mesh in case of FP-LAPW.
/** \param [in] gvec_coarse G-vectors of the coarse FFT grid.
* \param [in] num_mag_dims Number of magnetic dimensions.
* \param [in] effective_potential \f$ V_{eff}({\bf r}) \f$ on the fine grid FFT grid.
* \param [in] effective_magnetic_field \f$ {\bf B}_{eff}({\bf r}) \f$ on the fine FFT grid.
* \param [in] step_function Unit step function of the LAPW method.
*/
inline void prepare(Gvec const& gvec_coarse__,
int num_mag_dims__,
Periodic_function<double>* effective_potential__,
Periodic_function<double>* effective_magnetic_field__[3],
Step_function const& step_function__)
{
PROFILE("sirius::Local_operator::prepare");
/* group effective fields into single vector */
std::vector<Periodic_function<double>*> veff_vec(num_mag_dims__ + 1);
veff_vec[0] = effective_potential__;
for (int j = 0; j < num_mag_dims__; j++) {
veff_vec[1 + j] = effective_magnetic_field__[j];
}
/* allocate only once */
if (!veff_vec_.size()) {
veff_vec_ = mdarray<double, 2>(fft_coarse_.local_size(), num_mag_dims__ + 1, memory_t::host, "Local_operator::veff_vec_");
}
if (!theta_.size()) {
theta_ = mdarray<double, 1>(fft_coarse_.local_size(), memory_t::host, "Local_operator::theta_");
}
if (!buf_rg_.size()) {
buf_rg_ = mdarray<double_complex, 1>(fft_coarse_.local_size(), memory_t::host, "Local_operator::buf_rg_");
}
auto& fft_dense = effective_potential__->fft();
auto& gvec_dense = effective_potential__->gvec();
mdarray<double_complex, 1> v_pw_fine(gvec_dense.num_gvec());
/* low-frequency part of PW coefficients */
std::vector<double_complex> v_pw_coarse(gvec_coarse__.partition().gvec_count_fft());
/* prepare coarse-grained FFT for transformation */
fft_coarse_.prepare(gvec_coarse__.partition());
/* map components of effective potential to a corase grid */
for (int j = 0; j < num_mag_dims__ + 1; j++) {
for (int ir = 0; ir < fft_dense.local_size(); ir++) {
fft_dense.buffer(ir) = veff_vec[j]->f_rg(ir) * step_function__.theta_r(ir);
}
if (fft_dense.pu() == GPU) {
fft_dense.buffer().copy<memory_t::host, memory_t::device>();
}
fft_dense.transform<-1>(gvec_dense.partition(), &v_pw_fine[gvec_dense.partition().gvec_offset_fft()]);
fft_dense.comm().allgather(&v_pw_fine[0], gvec_dense.partition().gvec_offset_fft(),
gvec_dense.partition().gvec_count_fft());
if (j == 0) {
v0_[0] = v_pw_fine[0].real();
}
/* loop over low-frequency G-vectors */
for (int ig = 0; ig < gvec_coarse__.partition().gvec_count_fft(); ig++) {
/* G-vector in fractional coordinates */
auto G = gvec_coarse__.gvec(ig + gvec_coarse__.partition().gvec_offset_fft());
v_pw_coarse[ig] = v_pw_fine[gvec_dense.index_by_gvec(G)];
}
fft_coarse_.transform<1>(gvec_coarse__.partition(), &v_pw_coarse[0]);
fft_coarse_.output(&veff_vec_(0, j));
}
/* map unit-step function */
for (int ig = 0; ig < gvec_coarse__.partition().gvec_count_fft(); ig++) {
/* G-vector in fractional coordinates */
auto G = gvec_coarse__.gvec(ig + gvec_coarse__.partition().gvec_offset_fft());
v_pw_coarse[ig] = step_function__.theta_pw(gvec_dense.index_by_gvec(G));
}
fft_coarse_.transform<1>(gvec_coarse__.partition(), &v_pw_coarse[0]);
fft_coarse_.output(&theta_(0));
/* release FFT driver */
fft_coarse_.dismiss();
if (fft_coarse_.pu() == GPU) {
veff_vec_.allocate(memory_t::device);
veff_vec_.copy<memory_t::host, memory_t::device>();
theta_.allocate(memory_t::device);
theta_.copy<memory_t::host, memory_t::device>();
buf_rg_.allocate(memory_t::device);
}
if (param_->control().print_checksum_) {
auto cs = veff_vec_.checksum();
DUMP("checksum(veff_vec): %18.10f", cs);
auto cs1 = theta_.checksum();
DUMP("checksum(theta): %18.10f", cs1);
}
}
/// Prepare the k-point dependent arrays.
inline void prepare(Gvec const& gkvec__)
{
PROFILE("sirius::Local_operator::prepare");
int ngv_fft = gkvec__.partition().gvec_count_fft();
/* cache kinteic energy of plane-waves */
if (static_cast<int>(pw_ekin_.size()) < ngv_fft) {
pw_ekin_ = mdarray<double, 1>(ngv_fft, memory_t::host, "Local_operator::pw_ekin");
}
for (int ig_loc = 0; ig_loc < ngv_fft; ig_loc++) {
/* global index of G-vector */
int ig = gkvec__.partition().gvec_offset_fft() + ig_loc;
/* get G+k in Cartesian coordinates */
auto gv = gkvec__.gkvec_cart(ig);
pw_ekin_[ig_loc] = 0.5 * (gv * gv);
}
if (static_cast<int>(vphi1_.size()) < ngv_fft) {
vphi1_ = mdarray<double_complex, 1>(ngv_fft, memory_t::host, "Local_operator::vphi1");
}
if (gkvec__.reduced() && static_cast<int>(vphi2_.size()) < ngv_fft) {
vphi2_ = mdarray<double_complex, 1>(ngv_fft, memory_t::host, "Local_operator::vphi2");
}
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
pw_ekin_.allocate(memory_t::device);
pw_ekin_.copy_to_device();
vphi1_.allocate(memory_t::device);
if (gkvec__.reduced()) {
vphi2_.allocate(memory_t::device);
}
}
#endif
}
inline void dismiss()
{
#ifdef __GPU
veff_vec_.deallocate_on_device();
pw_ekin_.deallocate_on_device();
vphi1_.deallocate_on_device();
vphi2_.deallocate_on_device();
theta_.deallocate_on_device();
buf_rg_.deallocate_on_device();
#endif
}
template <device_t data_ptr_type>
void apply_h(int ispn__, wave_functions& hphi__, int idx0__, int n__)
{
PROFILE("sirius::Local_operator::apply_h");
auto& gkp = hphi__.gkvec().partition();
auto& comm_col = hphi__.gkvec().comm_ortho_fft();
switch (data_ptr_type) {
case CPU: {
hphi__.pw_coeffs().remap_forward(gkp.gvec_fft_slab(), comm_col, n__, idx0__);
break;
}
case GPU: {
hphi__.pw_coeffs().remap_forward<memory_t::host | memory_t::device>(gkp.gvec_fft_slab(), comm_col, n__, idx0__);
break;
}
}
int first{0};
/* if G-vectors are reduced, wave-functions are real and
* we can transform two of them at once */
if (gkp.reduced()) {
int npairs = hphi__.pw_coeffs().spl_num_col().local_size() / 2;
for (int i = 0; i < npairs; i++) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1, data_ptr_type>(gkp,
hphi__.pw_coeffs().extra().at<data_ptr_type>(0, 2 * i),
hphi__.pw_coeffs().extra().at<data_ptr_type>(0, 2 * i + 1));
/* multiply by effective potential */
if (fft_coarse_.pu() == GPU) {
#ifdef __GPU
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), veff_vec_.at<GPU>(0, ispn__));
#else
TERMINATE_NO_GPU
#endif
} else {
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
fft_coarse_.buffer(ir) *= veff_vec_(ir, ispn__);
}
}
/* V(r)phi(r) -> [V*phi](G) */
fft_coarse_.transform<-1, data_ptr_type>(gkp, vphi1_.at<data_ptr_type>(), vphi2_.at<data_ptr_type>());
/* add kinetic energy */
switch (data_ptr_type) {
case CPU: {
#pragma omp parallel for schedule(static)
for (int ig = 0; ig < gkp.gvec_count_fft(); ig++) {
hphi__.pw_coeffs().extra()(ig, 2 * i) = hphi__.pw_coeffs().extra()(ig, 2 * i) * pw_ekin_[ig] + vphi1_[ig];
hphi__.pw_coeffs().extra()(ig, 2 * i + 1) = hphi__.pw_coeffs().extra()(ig, 2 * i + 1) * pw_ekin_[ig] + vphi2_[ig];
}
break;
}
case GPU: {
#ifdef __GPU
add_pw_ekin_gpu(gkp.gvec_count_fft(), pw_ekin_.at<GPU>(), vphi1_.at<GPU>(),
hphi__.pw_coeffs().extra().at<GPU>(0, 2 * i));
add_pw_ekin_gpu(gkp.gvec_count_fft(), pw_ekin_.at<GPU>(), vphi2_.at<GPU>(),
hphi__.pw_coeffs().extra().at<GPU>(0, 2 * i + 1));
#else
TERMINATE_NO_GPU
#endif
break;
}
}
}
/* check if we have to do last wave-function which had no pair */
first = (hphi__.pw_coeffs().spl_num_col().local_size() % 2) ? hphi__.pw_coeffs().spl_num_col().local_size() - 1
: hphi__.pw_coeffs().spl_num_col().local_size();
}
/* if we don't have G-vector reductions, first = 0 and we start a normal loop */
for (int i = first; i < hphi__.pw_coeffs().spl_num_col().local_size(); i++) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1, data_ptr_type>(gkp, hphi__.pw_coeffs().extra().at<data_ptr_type>(0, i));
/* multiply by effective potential */
switch (fft_coarse_.pu()) {
case CPU: {
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
fft_coarse_.buffer(ir) *= veff_vec_(ir, ispn__);
}
break;
}
case GPU: {
#ifdef __GPU
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), veff_vec_.at<GPU>(0, ispn__));
#else
TERMINATE_NO_GPU
#endif
break;
}
}
/* V(r)phi(r) -> [V*phi](G) */
fft_coarse_.transform<-1, data_ptr_type>(gkp, vphi1_.at<data_ptr_type>());
/* add kinetic energy */
switch (data_ptr_type) {
case CPU: {
#pragma omp parallel for schedule(static)
for (int ig = 0; ig < gkp.gvec_count_fft(); ig++) {
hphi__.pw_coeffs().extra()(ig, i) = hphi__.pw_coeffs().extra()(ig, i) * pw_ekin_[ig] + vphi1_[ig];
}
break;
}
case GPU: {
#ifdef __GPU
add_pw_ekin_gpu(gkp.gvec_count_fft(), pw_ekin_.at<GPU>(), vphi1_.at<GPU>(),
hphi__.pw_coeffs().extra().at<GPU>(0, i));
#else
TERMINATE_NO_GPU
#endif
break;
}
}
}
switch (data_ptr_type) {
case CPU: {
hphi__.pw_coeffs().remap_backward(gkp.gvec_fft_slab(), comm_col, n__, idx0__);
break;
}
case GPU: {
hphi__.pw_coeffs().remap_backward<memory_t::host | memory_t::device>(gkp.gvec_fft_slab(), comm_col, n__, idx0__);
break;
}
}
}
void apply_h_o(Gvec_partition const& gkvec_par__,
int N__,
int n__,
wave_functions& phi__,
wave_functions& hphi__,
wave_functions& ophi__)
{
PROFILE("sirius::Local_operator::apply_h_o");
auto& comm_col = gkvec_par__.gvec().comm_ortho_fft();
fft_coarse_.prepare(gkvec_par__);
mdarray<double_complex, 1> buf_pw(gkvec_par__.gvec_count_fft());
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
phi__.pw_coeffs().copy_to_host(N__, n__);
}
#endif
if (param_->control().print_checksum_) {
auto cs = phi__.checksum_pw(N__, n__);
if (phi__.comm().rank() == 0) {
DUMP("checksum(phi_pw): %18.10f %18.10f", cs.real(), cs.imag());
}
}
phi__.pw_coeffs().remap_forward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
hphi__.pw_coeffs().set_num_extra(gkvec_par__.gvec_count_fft(), comm_col, n__, N__);
ophi__.pw_coeffs().set_num_extra(gkvec_par__.gvec_count_fft(), comm_col, n__, N__);
for (int j = 0; j < phi__.pw_coeffs().spl_num_col().local_size(); j++) {
if (fft_coarse_.pu() == CPU) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* save phi(r) */
buf_rg_[ir] = fft_coarse_.buffer(ir);
/* multiply by step function */
fft_coarse_.buffer(ir) *= theta_[ir];
}
/* phi(r) * Theta(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, ophi__.pw_coeffs().extra().at<CPU>(0, j));
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* multiply be effective potential, which itself was multiplied by the step function in constructor */
fft_coarse_.buffer(ir) = buf_rg_[ir] * veff_vec_(ir, 0);
}
/* phi(r) * Theta(r) * V(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, hphi__.pw_coeffs().extra().at<CPU>(0, j));
}
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
/* save phi(r) */
acc::copy(buf_rg_.at<GPU>(), fft_coarse_.buffer().at<GPU>(), fft_coarse_.local_size());
/* multiply by step function */
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), theta_.at<GPU>());
/* phi(r) * Theta(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, ophi__.pw_coeffs().extra().at<CPU>(0, j));
/* multiply by effective potential */
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, buf_rg_.at<GPU>(), veff_vec_.at<GPU>());
/* copy phi(r) * Theta(r) * V(r) to GPU buffer */
acc::copy(fft_coarse_.buffer().at<GPU>(), buf_rg_.at<GPU>(), fft_coarse_.local_size());
/* phi(r) * Theta(r) * V(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, hphi__.pw_coeffs().extra().at<CPU>(0, j));
}
#endif
/* add kinetic energy */
for (int x: {0, 1, 2}) {
for (int igloc = 0; igloc < gkvec_par__.gvec_count_fft(); igloc++) {
/* global index of G-vector */
int ig = gkvec_par__.gvec_offset_fft() + igloc;
/* \hat P phi = phi(G+k) * (G+k), \hat P is momentum operator */
buf_pw[igloc] = phi__.pw_coeffs().extra()(igloc, j) * gkvec_par__.gvec().gkvec_cart(ig)[x];
}
/* transform Cartesian component of wave-function gradient to real space */
fft_coarse_.transform<1>(gkvec_par__, &buf_pw[0]);
switch (fft_coarse_.pu()) {
case CPU: {
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* multiply be step function */
fft_coarse_.buffer(ir) *= theta_[ir];
}
break;
}
case GPU: {
#ifdef __GPU
/* multiply by step function */
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), theta_.at<GPU>());
#endif
break;
}
}
/* transform back to PW domain */
fft_coarse_.transform<-1>(gkvec_par__, &buf_pw[0]);
for (int igloc = 0; igloc < gkvec_par__.gvec_count_fft(); igloc++) {
int ig = gkvec_par__.gvec_offset_fft() + igloc;
hphi__.pw_coeffs().extra()(igloc, j) += 0.5 * buf_pw[igloc] * gkvec_par__.gvec().gkvec_cart(ig)[x];
}
}
}
hphi__.pw_coeffs().remap_backward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
ophi__.pw_coeffs().remap_backward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
fft_coarse_.dismiss();
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
hphi__.pw_coeffs().copy_to_device(N__, n__);
ophi__.pw_coeffs().copy_to_device(N__, n__);
}
#endif
if (param_->control().print_checksum_) {
auto cs1 = hphi__.checksum_pw(N__, n__);
auto cs2 = ophi__.checksum_pw(N__, n__);
if (phi__.comm().rank() == 0) {
DUMP("checksum(hphi_pw): %18.10f %18.10f", cs1.real(), cs1.imag());
DUMP("checksum(ophi_pw): %18.10f %18.10f", cs2.real(), cs2.imag());
}
}
}
void apply_o(Gvec_partition const& gkvec_par__,
int N__,
int n__,
wave_functions& phi__,
wave_functions& ophi__) const
{
PROFILE("sirius::Local_operator::apply_o");
auto& comm_col = gkvec_par__.gvec().comm_ortho_fft();
fft_coarse_.prepare(gkvec_par__);
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
phi__.pw_coeffs().copy_to_host(N__, n__);
}
#endif
if (param_->control().print_checksum_) {
auto cs = phi__.checksum_pw(N__, n__);
DUMP("checksum(phi): %18.10f %18.10f", cs.real(), cs.imag());
}
phi__.pw_coeffs().remap_forward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
ophi__.pw_coeffs().set_num_extra(gkvec_par__.gvec_count_fft(), comm_col, n__, N__);
for (int j = 0; j < phi__.pw_coeffs().spl_num_col().local_size(); j++) {
if (fft_coarse_.pu() == CPU) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* multiply by step function */
fft_coarse_.buffer(ir) *= theta_[ir];
}
/* phi(r) * Theta(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, ophi__.pw_coeffs().extra().at<CPU>(0, j));
} else {
#ifdef __GPU
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
/* multiply by step function */
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), theta_.at<GPU>());
/* phi(r) * Theta(r) -> ophi(G) */
fft_coarse_.transform<-1>(gkvec_par__, ophi__.pw_coeffs().extra().at<CPU>(0, j));
#else
TERMINATE_NO_GPU
#endif
}
}
ophi__.pw_coeffs().remap_backward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
fft_coarse_.dismiss();
#ifdef __GPU
if (fft_coarse_.pu() == GPU) {
ophi__.pw_coeffs().copy_to_device(N__, n__);
}
#endif
if (param_->control().print_checksum_) {
auto cs2 = ophi__.checksum_pw(N__, n__);
DUMP("checksum(ophi_istl): %18.10f %18.10f", cs2.real(), cs2.imag());
}
}
/// Apply magnetic field to the wave-functions.
/** In case of collinear magnetism only Bz is applied to <tt>phi</tt> and stored in the first component of
* <tt>bphi</tt>. In case of non-collinear magnetims Bx-iBy is also applied and stored in the third
* component of <tt>bphi</tt>. The second componet of <tt>bphi</tt> is used to store -Bz|phi>. */
void apply_b(Gvec_partition const& gkvec_par__,
int N__,
int n__,
wave_functions& phi__,
std::vector<wave_functions>& bphi__)
{
PROFILE("sirius::Local_operator::apply_b");
auto& comm_col = gkvec_par__.gvec().comm_ortho_fft();
fft_coarse_.prepare(gkvec_par__);
//#ifdef __GPU
//if (fft_coarse_.pu() == GPU) {
// phi__.pw_coeffs().copy_to_host(N__, n__);
//}
//#endif
/* components of H|psi> to which H is applied */
std::vector<int> iv(1, 0);
if (bphi__.size() == 3) {
iv.push_back(2);
}
phi__.pw_coeffs().remap_forward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
for (int i: iv) {
bphi__[i].pw_coeffs().set_num_extra(gkvec_par__.gvec_count_fft(), comm_col, n__, N__);
}
for (int j = 0; j < phi__.pw_coeffs().spl_num_col().local_size(); j++) {
if (fft_coarse_.pu() == CPU) {
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
/* save phi(r) */
if (bphi__.size() == 3) {
fft_coarse_.output(buf_rg_.at<CPU>());
}
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* multiply by Bz */
fft_coarse_.buffer(ir) *= veff_vec_(ir, 1);
}
/* phi(r) * Bz(r) -> bphi[0](G) */
fft_coarse_.transform<-1>(gkvec_par__, bphi__[0].pw_coeffs().extra().at<CPU>(0, j));
/* non-collinear case */
if (bphi__.size() == 3) {
#pragma omp parallel for schedule(static)
for (int ir = 0; ir < fft_coarse_.local_size(); ir++) {
/* multiply by Bx-iBy */
fft_coarse_.buffer(ir) = buf_rg_[ir] * double_complex(veff_vec_(ir, 2), -veff_vec_(ir, 3));
}
/* phi(r) * (Bx(r)-iBy(r)) -> bphi[2](G) */
fft_coarse_.transform<-1>(gkvec_par__, bphi__[2].pw_coeffs().extra().at<CPU>(0, j));
}
} else {
#ifdef __GPU
/* phi(G) -> phi(r) */
fft_coarse_.transform<1>(gkvec_par__, phi__.pw_coeffs().extra().at<CPU>(0, j));
/* multiply by Bz */
scale_matrix_rows_gpu(fft_coarse_.local_size(), 1, fft_coarse_.buffer().at<GPU>(), veff_vec_.at<GPU>(0, 1));
/* phi(r) * Bz(r) -> bphi[0](G) */
fft_coarse_.transform<-1>(gkvec_par__, bphi__[0].pw_coeffs().extra().at<CPU>(0, j));
#else
TERMINATE_NO_GPU
#endif
}
}
for (int i: iv) {
bphi__[i].pw_coeffs().remap_backward(gkvec_par__.gvec_fft_slab(), comm_col, n__, N__);
//#ifdef __GPU
//if (fft_coarse_.pu() == GPU) {
// bphi__[i].pw_coeffs().copy_to_device(N__, n__);
//}
//#endif
}
fft_coarse_.dismiss();
}
inline double v0(int ispn__) const
{
return v0_[ispn__];
}
};
} // namespace
#endif // __LOCAL_OPERATOR_H__
|
GB_unaryop__identity_int32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int32_int32
// op(A') function: GB_tran__identity_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int32_int32
(
int32_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
l7_push_setup.c | /*
* Copyright (c) 2011-2019, Triad National Security, LLC.
* All rights Reserved.
*
* CLAMR -- LA-CC-11-094
*
* Copyright 2011-2019. Triad National Security, LLC. This software was produced
* under U.S. Government contract 89233218CNA000001 for Los Alamos National
* Laboratory (LANL), which is operated by Triad National Security, LLC
* for the U.S. Department of Energy. The U.S. Government has rights to use,
* reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
* TRIAD NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
* ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified
* to produce derivative works, such modified software should be clearly marked,
* so as not to confuse it with the version available from LANL.
*
* Additionally, redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Triad National Security, LLC, Los Alamos
* National Laboratory, LANL, the U.S. Government, nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE TRIAD NATIONAL SECURITY, LLC AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TRIAD NATIONAL
* SECURITY, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include "l7.h"
#include "l7p.h"
#define L7_LOCATION "L7_PUSH_SETUP"
int L7_Push_Setup(
const int num_comm_partners,
const int *comm_partner,
const int *send_buffer_count,
int **send_database,
int *receive_count_total,
int *l7_push_id
)
{
/* Purpose
* =======
* L7_Push_Setup is used to setup the update/scatter database for
* when senders know which data to send and to what process. Each
* process sends in the data it needs to send in a send_database
* which is a ragged right array with the first index the
* num_comm_partners and the second the send_buffer_count for each
* processor with the indices to send. From this, a communication
* pattern and buffers are setup that allows subsequent calls to
* L7_Push_Update.
*
* Arguments
* =========
* num_comm_partners (input) const L7_INT
* global indexing set starts with 1 (Fortran)
* or with 0 (C)
*
* comm_partner (input) const L7_INT
* Starting index number of calling process
* in global indexing set.
*
* send_buffer_count (input) const L7_INT
* Number of indices owned by calling process.
*
* send_database (input) const L7_INT*
* Array containing indices needed by
* calling process.
*
* receive_count_total (output) const L7_INT
* Number of indices of interest listed
* in array 'num_indices_needed'.
*
* l7_push_id (input/output) int*
* Handle to database to be setup.
*
* 0: L7 sets up a new database, and
* assigns it a value.
* > 0: L7 resets existing database with
* input information. That is, it reuses
* the allocated memory.
* < 0: An error is returned.
*
* Notes:
* =====
* 1) The indices are handled as 4-byte integers. ??
*
* 2) Serial compilation creates a no-op. ??
*
* Program Flow ??
* ============
* 0) Check input for basic validity.
* 1) Set communication parameters within database.
* 2) Deternine processes this pe receives from.
* 3) Determine the number of processes this pe sends to.
* 4) Send number of as well as the indices needed from each sending process.
* 5) Set up array containing the pes this pe sends indices to.
* 6) Set up array containing the indices this pe sends to others.
*/
/*
* Local variables.
*/
int
ierr; /* Error code for return */
#ifdef HAVE_MPI
l7_push_id_database
*l7_push_id_db;
/*
* Executable Statements
*/
if (! l7.mpi_initialized){
return(0);
}
if (l7.initialized != 1){
ierr = -1;
L7_ASSERT( l7.initialized == 1, "L7 not initialized", ierr);
}
/*
* Check input
*/
/*
if (my_start_index < 0){
ierr = -1;
L7_ASSERT( my_start_index >= 0, "my_start_index < 0", ierr);
}
*/
if (num_comm_partners < 0){
ierr = -1;
L7_ASSERT( num_comm_partners >= 0, "num_comm_partners < 0", ierr);
}
/*
if (num_indices_needed > 0){
if (indices_needed == NULL){
ierr = -1;
L7_ASSERT( (int *)indices_needed != NULL,
"indices_needed == NULL", ierr);
}
}
*/
if (*l7_push_id < 0){
ierr = *l7_push_id;
L7_ASSERT( *l7_push_id >=0,
"L7 Push Id must be either 0 (new id) or > 0 (existing id)",
ierr);
}
/*
* Setup database structure.
*/
if (*l7_push_id != 0){
/*
* Find it in the database and update based on new input.
*/
if (l7.first_push_db == NULL){
L7_ASSERT(l7.first_push_db != NULL,
"Uninitialized l7_push_id input, but no ids in database",
ierr);
}
l7_push_id_db = l7.first_push_db;
while (l7_push_id_db){
if (l7_push_id_db->l7_push_id == *l7_push_id)
break;
l7_push_id_db = l7_push_id_db->next_push_db;
}
if (l7.first_push_db == NULL){
ierr = -1;
L7_ASSERT( l7.first_push_db != NULL,
"Uninitialized l7_push_id input, but not found in this list",
ierr);
}
}
else{
/*
* Allocate new database, insert into linked list.
*/
if (l7.num_push_dbs >= L7_MAX_NUM_DBS){
ierr = -1;
L7_ASSERT(l7.num_push_dbs < L7_MAX_NUM_DBS,
"Too many L7 databases allocated",
ierr);
}
l7_push_id_db = (l7_push_id_database*)calloc(1L, sizeof(l7_push_id_database) );
if (l7_push_id_db == NULL){
ierr = -1;
L7_ASSERT( l7_push_id_db != NULL, "Failed to allocate new database",
ierr);
}
if ( !(l7.first_push_db) ){
l7.first_push_db = l7_push_id_db;
l7.last_push_db = l7_push_id_db;
l7_push_id_db->next_push_db = NULL; /* Paranoia */
l7_push_id_db->l7_push_id = 1;
l7.num_push_dbs = 1;
}
else{
/*
* Assign a l7_id.
*/
l7_push_id_db->l7_push_id = l7.last_push_db->l7_push_id + 1;
/*
* Reset links.
*/
l7.last_push_db->next_push_db = l7_push_id_db;
l7.last_push_db = l7_push_id_db;
l7.num_push_dbs++;
}
*l7_push_id = l7_push_id_db->l7_push_id;
/*
* Initialize some parameters.
*/
/*
l7_id_db->recv_counts_len = 0;
l7_id_db->recv_from_len = 0;
l7_id_db->send_to_len = 0;
l7_id_db->send_counts_len = 0;
l7_id_db->indices_to_send_len = 0;
l7_id_db->mpi_request_len = 0;
l7_id_db->mpi_status_len = 0;
*/
}
/*
* Allocate arrays and
* Store input in database.
*/
if (l7_push_id_db->num_comm_partners < num_comm_partners){
// comm_partner
if (l7_push_id_db->comm_partner)
free(l7_push_id_db->comm_partner);
l7_push_id_db->comm_partner = (int *) calloc(num_comm_partners,sizeof(int));
if (l7_push_id_db->comm_partner == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->comm_partner) != NULL,
"Memory failure for comm_partner",
ierr);
}
// send_buffer_count
if (l7_push_id_db->send_buffer_count)
free(l7_push_id_db->send_buffer_count);
l7_push_id_db->send_buffer_count = (int *) calloc(num_comm_partners,sizeof(int));
if (l7_push_id_db->send_buffer_count == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->send_buffer_count) != NULL,
"Memory failure for send_buffer_count",
ierr);
}
// recv_buffer_count
if (l7_push_id_db->recv_buffer_count)
free(l7_push_id_db->recv_buffer_count);
l7_push_id_db->recv_buffer_count = (int *) calloc(num_comm_partners,sizeof(int));
if (l7_push_id_db->recv_buffer_count == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->recv_buffer_count) != NULL,
"Memory failure for recv_buffer_count",
ierr);
}
// send_database
if (l7_push_id_db->send_database){
for (int ip = 0; ip < num_comm_partners; ip++){
if (l7_push_id_db->send_database[ip]) free(l7_push_id_db->send_database[ip]);
}
if (l7_push_id_db->send_database) free(l7_push_id_db->send_database);
}
l7_push_id_db->send_database = (int **) calloc(num_comm_partners,sizeof(int *));
if (l7_push_id_db->send_database == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->send_database) != NULL,
"Memory failure for send_database",
ierr);
}
for (int ip = 0; ip < num_comm_partners; ip++){
l7_push_id_db->send_database[ip] = (int *) calloc(send_buffer_count[ip],sizeof(int));
if (l7_push_id_db->send_database[ip] == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->send_database) != NULL,
"Memory failure for send_database",
ierr);
}
}
// send_buffer
if (l7_push_id_db->send_buffer){
for (int ip = 0; ip < num_comm_partners; ip++){
if (l7_push_id_db->send_buffer[ip]) free(l7_push_id_db->send_buffer[ip]);
}
if (l7_push_id_db->send_buffer) free(l7_push_id_db->send_buffer);
}
l7_push_id_db->send_buffer = (int **) calloc(num_comm_partners,sizeof(int *));
if (l7_push_id_db->send_buffer == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->send_buffer) != NULL,
"Memory failure for send_buffer",
ierr);
}
for (int ip = 0; ip < num_comm_partners; ip++){
l7_push_id_db->send_buffer[ip] = (int *) calloc(send_buffer_count[ip],sizeof(int));
if (l7_push_id_db->send_buffer[ip] == NULL){
ierr = -1;
L7_ASSERT( (int*)(l7_push_id_db->send_buffer) != NULL,
"Memory failure for send_buffer",
ierr);
}
}
}
/*
* Copy input data into database
*/
l7_push_id_db->num_comm_partners = num_comm_partners;
#pragma omp simd
for (int ip = 0; ip < num_comm_partners; ip++){
l7_push_id_db->comm_partner[ip] = comm_partner[ip];
l7_push_id_db->send_buffer_count[ip] = send_buffer_count[ip];
}
for (int ip = 0; ip < num_comm_partners; ip++){
int count = send_buffer_count[ip]; // create simple int count to help vectorization
#pragma omp simd
for (int ic = 0; ic < count; ic++){
l7_push_id_db->send_database[ip][ic] = send_database[ip][ic];
}
}
/*
* Get receive counts by communication
*/
MPI_Request request[2*num_comm_partners];
MPI_Status status[2*num_comm_partners];
for (int ip = 0; ip < num_comm_partners; ip++){
MPI_Irecv(&l7_push_id_db->recv_buffer_count[ip], 1, MPI_INT, l7_push_id_db->comm_partner[ip],
l7_push_id_db->comm_partner[ip], MPI_COMM_WORLD, &request[ip]);
}
for (int ip = 0; ip < num_comm_partners; ip++){
MPI_Isend(&l7_push_id_db->send_buffer_count[ip], 1, MPI_INT, l7_push_id_db->comm_partner[ip],
l7.penum, MPI_COMM_WORLD, &request[num_comm_partners+ip]);
}
MPI_Waitall(2*num_comm_partners, request, status);
/*
* Calculate sum of receives
*/
*receive_count_total = 0;
for (int ip = 0; ip < num_comm_partners; ip++){
*receive_count_total += l7_push_id_db->recv_buffer_count[ip];
}
l7_push_id_db->receive_count_total = *receive_count_total;
#endif /* HAVE_MPI */
ierr = L7_OK;
return(ierr);
} /* End L7_Push_Setup */
|
residualbased_elimination_builder_and_solver_componentwise.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE
/* System includes */
#include <set>
/* External includes */
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "includes/global_pointer_variables.h"
#include "utilities/builtin_timer.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
This is a specialization of the standard buliding strategy to the case in which a single variable is to be used in the
building.
the creation of the DofList and the construction of the system matrix is in this case much faster
as the neighborhood relationships are considered to be known
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace ,
class TLinearSolver,
class TVariableType
>
class ResidualBasedEliminationBuilderAndSolverComponentwise
: public ResidualBasedEliminationBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationBuilderAndSolverComponentwise );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> ResidualBasedEliminationBuilderAndSolverType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedEliminationBuilderAndSolverComponentwise",
"components_wise_variable" : "SCALAR_VARIABLE_OR_COMPONENT"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
rVar = KratosComponents<TVariableType>::Get(ThisParameters["components_wise_variable"].GetString());
}
/**
* @brief Default constructor. Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolverComponentwise(
typename TLinearSolver::Pointer pNewLinearSystemSolver,TVariableType const& Var)
: ResidualBasedEliminationBuilderAndSolverType(pNewLinearSystemSolver)
, rVar(Var)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolverComponentwise() override {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b) override
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = ParallelUtilities::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
for(int i = 0; i<A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH( number_of_threads );
KRATOS_WATCH( element_partition );
}
const auto timer = BuiltinTimer();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
//assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
DenseVector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for firstprivate(number_of_threads) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(rVar);
// A all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,CurrentProcessInfo);
Geometry< Node<3> >& geom = (*it)->GetGeometry();
if(EquationId.size() != geom.size()) EquationId.resize(geom.size(),false);
for(unsigned int i=0; i<geom.size(); i++)
{
EquationId[i] = geom[i].GetDof(rVar,pos).EquationId();
}
#ifdef USE_LOCKS_IN_ASSEMBLY
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId,lock_array);
#else
this->Assemble(A,b,LHS_Contribution,RHS_Contribution,EquationId);
#endif
}
}
if (this->GetEchoLevel()>0) {
std::cout << "parallel building time: " << timer.ElapsedSeconds() << std::endl;
}
#ifdef _OPENMP
for(int i = 0; i<A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part
) override
{
KRATOS_TRY
//fills a list of "active" nodes defined as nodes which have neighbours
// AND no fixed pressure
mActiveNodes.clear();
mActiveNodes.reserve(r_model_part.Nodes().size() );
for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it)
{
if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 )
{
mActiveNodes.push_back(*(it.base() ));
}
}
//fills the DofList and give a unique progressive tag to each node
BaseType::mDofSet.clear();
BaseType::mDofSet.reserve(mActiveNodes.size() );
for(GlobalPointersVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++)
{
BaseType::mDofSet.push_back( iii->pGetDof(rVar) );
}
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag())
{
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl
<< "Node : "<<dof_iterator->Id()<< std::endl
<< "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl;
}
}
#endif
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) );
pA.swap(pNewA);
}
if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) );
pDx.swap(pNewDx);
}
if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) );
pb.swap(pNewb);
}
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
//KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
#ifdef _OPENMP
ParallelConstructGraph(A);
#else
ConstructGraph(A);
#endif
}
}
if (Dx.size() != BaseType::mEquationSystemSize) {
Dx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(Dx);
if (b.size() != BaseType::mEquationSystemSize) {
b.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(b);
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if(BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize,false);
}
//swapping pointers
// pA.swap(pNewA);
// pDx.swap(pNewDx);
// pb.swap(pNewb);
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
//**************************************************************************
//**************************************************************************
void Clear() override
{
this->mDofSet = DofsArrayType();
if(this->mpReactionsVector != NULL)
{
TSparseSpace::Clear( (this->mpReactionsVector) );
}
// *(this->mpReactionsVector) = TSystemVectorType();
if (this->GetEchoLevel()>1)
{
KRATOS_WATCH("ResidualBasedEliminationBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolverComponentwise";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
void ConstructGraph(TSystemMatrixType& A)
{
KRATOS_TRY
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int total_size = 0;
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
int index_i;
for(GlobalPointersVector< Node<3> >::iterator in = mActiveNodes.begin();
in!=mActiveNodes.end(); in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
total_size += indices.size();
}
}
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
//**************************************************************************
//**************************************************************************
#ifdef _OPENMP
void ParallelConstructGraph(TSystemMatrixType& A)
{
#ifndef __SUNPRO_CC
KRATOS_TRY
#endif
std::vector< std::vector<int> > index_list(BaseType::mEquationSystemSize);
int number_of_threads = omp_get_max_threads();
unsigned int pos = (mActiveNodes.begin())->GetDofPosition(rVar);
//constructing the system matrix row by row
DenseVector<unsigned int> partition;
DenseVector<unsigned int> local_sizes(number_of_threads);
for(int i=0; i<number_of_threads; i++)
local_sizes[i] = 0;
CreatePartition(number_of_threads, mActiveNodes.size(), partition);
#pragma omp parallel for firstprivate(number_of_threads,pos) schedule(static,1)
for(int k=0; k<number_of_threads; k++)
{
GlobalPointersVector< Node<3> >::iterator it_begin = mActiveNodes.begin()+partition[k];
GlobalPointersVector< Node<3> >::iterator it_end = mActiveNodes.begin()+partition[k+1];
for(GlobalPointersVector< Node<3> >::iterator in = it_begin;
in!=it_end; in++)
{
const Node<3>::DofType& current_dof = in->GetDof(rVar,pos);
if( current_dof.IsFixed() == false)
{
int index_i = (current_dof).EquationId();
GlobalPointersVector< Node<3> >& neighb_nodes = in->GetValue(NEIGHBOUR_NODES);
std::vector<int>& indices = index_list[index_i];
indices.reserve(neighb_nodes.size()+1);
//filling the first neighbours list
indices.push_back(index_i);
for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin();
i != neighb_nodes.end(); i++)
{
const Node<3>::DofType& neighb_dof = i->GetDof(rVar,pos);
if(neighb_dof.IsFixed() == false )
{
int index_j = (neighb_dof).EquationId();
indices.push_back(index_j);
}
}
//sorting the indices and elminating the duplicates
std::sort(indices.begin(),indices.end());
typename std::vector<int>::iterator new_end = std::unique(indices.begin(),indices.end());
indices.erase(new_end,indices.end());
local_sizes[k] += indices.size();
}
}
}
//calculate the total size of the system
int total_size = 0.0;
for(int i=0; i<number_of_threads; i++)
total_size += local_sizes[i];
A.reserve(total_size,false);
//setting to zero the matrix (and the diagonal matrix)
for(unsigned int i=0; i<BaseType::mEquationSystemSize; i++)
{
std::vector<int>& indices = index_list[i];
for(unsigned int j=0; j<indices.size(); j++)
{
A.push_back(i,indices[j] , 0.00);
}
}
#ifndef __SUNPRO_CC
KRATOS_CATCH("")
#endif
}
#endif
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
TVariableType const & rVar;
GlobalPointersVector<Node<3> > mActiveNodes;
/*@} */
/**@name Private Operators*/
/*@{ */
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, DenseVector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ResidualBasedEliminationBuilderAndSolverComponentwise */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVERCOMPONENTWISE defined */
|
old_Clique.h | // This code is part of the project "Theoretically Efficient Parallel Graph
// Algorithms Can Be Fast and Scalable", presented at Symposium on Parallelism
// in Algorithms and Architectures, 2018.
// Copyright (c) 2018 Laxman Dhulipala, Guy Blelloch, and Julian Shun
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#pragma once
#include <math.h>
#include "ligra/bucket.h"
#include "ligra/edge_map_reduce.h"
#include "ligra/ligra.h"
#include "ligra/pbbslib/dyn_arr.h"
#include "pbbslib/list_allocator.h"
#include "pbbslib/integer_sort.h"
#include "intersect.h"
//#include "radix_wrapper.h"
#include "benchmarks/DegeneracyOrder/BarenboimElkin08/DegeneracyOrder.h"
#include "benchmarks/DegeneracyOrder/GoodrichPszona11/DegeneracyOrder.h"
#include "benchmarks/KCore/JulienneDBS17/KCore.h"
#define SIMD_STATE 4
/*struct lw_sym_graph {
size_t n;
size_t m;
sequence<uintT> offsets;
sequence<uintE> edges;
lw_sym_graph(size_t _n, size_t _m, sequence<uintT> _offsets, sequence<uintE> _edges) : n(_n), m(_m), offsets(_offsets), edges(_edges) {}
uintE getOutDegree(uintT i) {
return offsets[i+1]-offsets[i];
}
uintE* getOutNeighbors(uintT i) {
return edges.begin() + offsets[i];
}
};*/
template <template <class W> class vertex, class W, typename P,
typename std::enable_if<
std::is_same<vertex<W>, csv_bytepd_amortized<W>>::value,
int>::type = 0>
inline auto relabel_graph(symmetric_graph<vertex, W>& G, uintE* rank, P& pred) -> decltype(G) {
assert(false); return G;
}
template <
template <class W> class vertex, class W, typename P,
typename std::enable_if<std::is_same<vertex<W>, asymmetric_vertex<W>>::value,
int>::type = 0>
inline auto relabel_graph(asymmetric_graph<vertex, W>& G, uintE* rank, P& pred) -> decltype(G) {
std::cout << "Filter graph not implemented for directed graphs" << std::endl;
assert(false); // Not implemented for directed graphs
return G;
}
template <
template <class W> class vertex, class W, typename P,
typename std::enable_if<
std::is_same<vertex<W>, cav_bytepd_amortized<W>>::value, int>::type = 0>
inline auto relabel_graph(asymmetric_graph<vertex, W>& G,uintE* rank, P& pred) -> decltype(G) {
std::cout << "Filter graph not implemented for directed graphs" << std::endl;
assert(false); // Not implemented for directed graphs
return G;
}
template <template <class W> class vertex, class W, typename P,
typename std::enable_if<std::is_same<vertex<W>, symmetric_vertex<W>>::value,
int>::type = 0>
inline symmetric_graph<symmetric_vertex, W> relabel_graph(symmetric_graph<vertex, W>& GA, uintE* rank, P& pred) {
using w_vertex = vertex<W>;
auto G = filter_graph(GA, pred);
size_t n = G.n;
auto outOffsets = sequence<uintT>(n + 1);
parallel_for(0, n, [&] (size_t i) {
w_vertex u = G.get_vertex(i);
outOffsets[rank[i]] = u.getOutDegree();
}, 1);
outOffsets[n] = 0;
uintT outEdgeCount = pbbslib::scan_add_inplace(outOffsets);
using edge = std::tuple<uintE, W>;
auto out_edges = sequence<edge>(outEdgeCount);
parallel_for(0, n, [&] (size_t i) {
w_vertex u = G.get_vertex(i);
size_t out_offset = outOffsets[rank[i]];
uintE d = u.getOutDegree();
edge* nghs = u.getOutNeighbors();
edge* dir_nghs = out_edges.begin() + out_offset;
for (size_t j=0; j < d; j++) {
dir_nghs[j] = std::make_tuple(rank[std::get<0>(nghs[j])], std::get<1>(nghs[j]));
}
pbbslib::sample_sort (dir_nghs, d, [&](const edge u, const edge v) {
return std::get<0>(u) < std::get<0>(v);
}, true);
}, 1);
auto out_vdata = pbbs::new_array_no_init<vertex_data>(n);
parallel_for(0, n, [&] (size_t i) {
out_vdata[i].offset = outOffsets[i];
out_vdata[i].degree = outOffsets[i+1]-outOffsets[i];
});
outOffsets.clear();
auto out_edge_arr = out_edges.to_array();
G.del();
return symmetric_graph<symmetric_vertex, W>(
out_vdata, n, outEdgeCount,
get_deletion_fn(out_vdata, out_edge_arr),
out_edge_arr);
}
template <class Graph>
inline uintE* rankNodes(Graph& G, size_t n) {
uintE* r = pbbslib::new_array_no_init<uintE>(n);
sequence<uintE> o(n);
timer t;
t.start();
par_for(0, n, pbbslib::kSequentialForThreshold, [&] (size_t i) { o[i] = i; });
pbbslib::sample_sort_inplace(o.slice(), [&](const uintE u, const uintE v) {
return G.get_vertex(u).getOutDegree() < G.get_vertex(v).getOutDegree();
});
par_for(0, n, pbbslib::kSequentialForThreshold, [&] (size_t i)
{ r[o[i]] = i; });
t.stop();
debug(t.reportTotal("Rank time"););
return r;
}
template<class Graph>
size_t get_max_deg(Graph& DG) {
size_t max_deg = 0;
for (size_t i=0; i < DG.n; i++) {
size_t deg = DG.get_vertex(i).getOutDegree();
if (deg > max_deg) max_deg = deg;
}
return max_deg;
//auto idxs = sequence<size_t>::no_init(DG.n);
//parallel_for (0,DG.n,[&] (size_t i) { idxs[i] = DG.get_vertex(i).getOutDegree(); });
//auto base_deg_f = [&](size_t i, size_t j) -> size_t {
// return idxs[i] > idxs[j] ? idxs[i] : idxs[j];
//};
//return pbbslib::reduce(idxs, pbbslib::make_monoid(base_deg_f, 0));
}
/*if (!count_only) base[k_idx] = induced_space.induced[i];
uintE vtx = induced_space.induced[i];
//assert (vtx < DG.n);
auto vtx_ptr = (uintE*)(DG.get_vertex(vtx).getOutNeighbors());
auto vtx_size = DG.get_vertex(vtx).getOutDegree();
size_t min_size = std::min((size_t) induced_space.num_induced, (size_t) vtx_size);
if (min_size < k - k_idx) {new_induced_space.running_sum = induced_space.running_sum; return 0;}
bool out_ptr_flag = false;
if (!new_induced_space.induced && (to_save || intersect_op_type.count_space_flag)) {
out_ptr_flag = true;
new_induced_space.alloc_induced(min_size, induced_space);
}
auto out_ptr = new_induced_space.induced;
new_induced_space.num_induced = min_size;
size_t out_size = intersect_op_type(vtx_ptr, vtx_size, induced_space.induced, induced_space.num_induced, to_save, out_ptr);
if (out_ptr_flag && (!to_save || out_size == 0)) {
new_induced_space.del();
}
new_induced_space.num_induced = out_size;
new_induced_space.running_sum = induced_space.running_sum + new_induced_space.num_induced;
return out_size;
*/
template <class Graph>
inline size_t KCliqueDir_fast_rec(Graph& DG, size_t k_idx, size_t k, InducedSpace_lw* induced) {
size_t num_induced = induced->num_induced[k_idx-1];
uintE* prev_induced = induced->induced + induced->num_induced[0] * (k_idx - 1);
if (num_induced == 0) return 0;
if (k_idx + 1 == k) {
size_t counts = 0;
for (size_t i=0; i < num_induced; i++) {
uintE vtx = prev_induced[i];
counts += lstintersect_set(prev_induced, num_induced, (uintE*)(DG.get_vertex(vtx).getOutNeighbors()), DG.get_vertex(vtx).getOutDegree(), false, nullptr);
}
return counts;
}
size_t total_ct = 0;
for (size_t i=0; i < num_induced; ++i) {
uintE vtx = prev_induced[i];
induced->num_induced[k_idx] = lstintersect_set(prev_induced, num_induced, (uintE*)(DG.get_vertex(vtx).getOutNeighbors()), DG.get_vertex(vtx).getOutDegree(), true, induced->induced + induced->num_induced[0] * k_idx);
if (induced->num_induced[k_idx] > 0) total_ct += KCliqueDir_fast_rec(DG, k_idx + 1, k, induced);
}
return total_ct;
}
template <class Graph>
inline size_t KCliqueDir_fast(Graph& DG, size_t k) {
size_t n = 0;
size_t max_deg = get_max_deg(DG);
InducedSpace_lw* induced = nullptr;
#pragma omp parallel private(induced) reduction(+:n)
{
induced = new InducedSpace_lw(k, max_deg);//(uintE*) malloc(k*max_deg*sizeof(uintE));
#pragma omp for schedule(dynamic, 1) nowait
for (size_t i=0; i < DG.n; ++i) {
if (DG.get_vertex(i).getOutDegree() != 0) {
induced->num_induced[0] = (uintE) DG.get_vertex(i).getOutDegree();
//parallel_for (0, induced->num_induced[0], [&] (size_t j) {
// induced->induced[j] = ((uintE*)(DG.get_vertex(i).getOutNeighbors()))[j];
//});
for (size_t j=0; j < induced->num_induced[0]; j++) {
induced->induced[j] = ((uintE*)(DG.get_vertex(i).getOutNeighbors()))[j];
}
n += KCliqueDir_fast_rec(DG, 1, k, induced);
}
}
if (induced != nullptr) { induced->del(); delete induced; }
}
return n;
}
template <class Graph>
inline size_t KCliqueDir_fast_orig_rec(Graph& DG, size_t k_idx, size_t k, FullSpace_orig_lw* induced) {
size_t num_induced = induced->num_induced[k_idx-1];
uintE* prev_induced = induced->induced + induced->nn * (k_idx - 1);
uintE* prev_induced_degs = induced->induced_degs + induced->nn * (k_idx - 1);
if (num_induced == 0) return 0;
if (k_idx + 1 == k) return induced->num_edges[k_idx - 1];
size_t total_ct = 0;
for (size_t i=0; i < num_induced; ++i) {
uintE idx = prev_induced[i];
uintE* new_induced = induced->induced + induced->nn * k_idx;
induced->num_induced[k_idx] = prev_induced_degs[idx];
uintE new_num_induced = induced->num_induced[k_idx];
//parallel_for(0, new_num_induced, [&] (size_t j) {new_induced[j] = induced->induced_edges[idx * induced->nn + j]; });
for (size_t j=0; j < new_num_induced; j++) { new_induced[j] = induced->induced_edges[idx * induced->nn + j]; }
//parallel_for(0, new_num_induced, [&] (size_t j){ induced->labels[new_induced[j]] = k_idx; });
for (size_t j=0; j < new_num_induced; j++) { induced->labels[new_induced[j]] = k_idx; }
uintE* new_induced_degs = induced->induced_degs + induced->nn * k_idx;
//parallel_for(0, induced->nn, [&] (size_t j) { new_induced_degs[j] = 0; });
for (size_t j=0; j < induced->nn; j++) { new_induced_degs[j] = 0; }
for (size_t j=0; j < new_num_induced; j++) {
uintE v_idx = new_induced[j];
uintE v_deg = prev_induced_degs[v_idx];
uintE* v_edges = induced->induced_edges + v_idx * induced->nn;
size_t end = v_deg;
for (size_t l=0; l < end; l++) {
if (induced->labels[v_edges[l]] == k_idx) new_induced_degs[v_idx]++;
else { // if (to_save)
auto tmp = v_edges[l];
v_edges[l--] = v_edges[--end];
v_edges[end] = tmp;
}
}
}
/*parallel_for(0, new_num_induced, [&] (size_t j) {
uintE v_idx = new_induced[j];
uintE v_deg = prev_induced_degs[v_idx];
uintE* v_edges = induced->induced_edges + v_idx * induced->nn;
size_t end = v_deg;
for (size_t l=0; l < end; l++) {
if (induced->labels[v_edges[l]] == k_idx) new_induced_degs[v_idx]++;
else { // if (to_save)
auto tmp = v_edges[l];
v_edges[l--] = v_edges[--end];
v_edges[end] = tmp;
}
}
});*/
auto deg_seq = pbbslib::make_sequence(new_induced_degs, induced->nn);
induced->num_edges[k_idx] = pbbslib::reduce_add(deg_seq);
//uintE vtx = prev_induced[i];
//induced->num_induced[k_idx] = lstintersect_set(prev_induced, num_induced, (uintE*)(DG.get_vertex(vtx).getOutNeighbors()), DG.get_vertex(vtx).getOutDegree(), true, induced->induced + induced->num_induced[0] * k_idx);
if (induced->num_induced[k_idx] > 0) total_ct += KCliqueDir_fast_orig_rec(DG, k_idx + 1, k, induced);
//parallel_for(0, new_num_induced, [&] (size_t j){ induced->labels[new_induced[j]] = k_idx-1; });
for (size_t j=0; j < new_num_induced; j++) { induced->labels[new_induced[j]] = k_idx-1; }
}
return total_ct;
}
template <class Graph>
inline size_t KCliqueDir_fast_orig(Graph& DG, size_t k) {
size_t n = 0;
size_t max_deg = get_max_deg(DG);
FullSpace_orig_lw* induced = nullptr;
#pragma omp parallel private(induced) reduction(+:n)
{
induced = new FullSpace_orig_lw(max_deg, k);
#pragma omp for schedule(dynamic, 1) nowait
for (size_t i=0; i < DG.n; ++i) {
if (DG.get_vertex(i).getOutDegree() != 0) {
induced->setup(DG, k, i);
n += KCliqueDir_fast_orig_rec(DG, 1, k, induced);
}
}
if (induced != nullptr) { induced->del(); delete induced; }
}
return n;
}
// induced_space must have: num_induced, .del()
template <class IN, class Graph, class I, class F, class G, class H>
inline size_t KCliqueDir_rec(Graph& DG, size_t k_idx, size_t k, I& induced_space,
F intersect_op, G intersect_op_type, sequence<uintE>& base, H base_op, bool count_only = true) {
size_t num_induced = induced_space.num_induced;
if (num_induced == 0) return 0; //return induced_space.running_sum;
if (k_idx == k) {
base_op(base);
return num_induced;
}
// optimization if counting and not listing
if (k_idx + 1 == k && count_only) {
//return num_induced;
if (induced_space.full_flag) return induced_space.num_edges;
//sequence<size_t> counts = sequence<size_t>::no_init(num_induced);
size_t counts = 0;
//parallel_for (0, num_induced, [&] (size_t i) {
for (size_t i=0; i < num_induced; i++) {
auto new_induced_space = IN();
counts += intersect_op(DG, k_idx, k, i, induced_space, intersect_op_type, base, count_only, false, new_induced_space);
}//);
return counts; //pbbslib::reduce_add(counts);
}
size_t total_ct = 0;
for (size_t i=0; i < num_induced; ++i) {
auto new_induced_space = IN();
size_t new_num_induced = intersect_op(DG, k_idx, k, i, induced_space, intersect_op_type, base, count_only, true, new_induced_space);
if (new_num_induced > 0) { // >= k - k_idx
total_ct += KCliqueDir_rec<IN>(DG, k_idx + 1, k, new_induced_space, intersect_op, intersect_op_type, base, base_op, count_only);
new_induced_space.del();
}
}
return total_ct; // num_induced +
}
template <class IN, class I, class Graph, class F, class G, class H>
inline size_t KCliqueDir(Graph& DG, size_t k, F intersect_op, G intersect_op_type, H base_op, bool count_only = true) {
IN::init();
//auto tots = sequence<size_t>::no_init(DG.n);
//static I* induced_space_pt = nullptr;
size_t n = 0;
size_t max_deg = get_max_deg(DG);
//uintE* induced = nullptr;
I* induced = nullptr;
//#pragma omp threadprivate(induced)
#pragma omp parallel private(induced) reduction(+:n)
{
induced = new I(max_deg, k); //(uintE*) malloc(k*max_deg*sizeof(uintE)); //max_deg
#pragma omp for schedule(dynamic, 1) nowait
for (size_t i=0; i < DG.n; ++i) {
//parallel_for (0, DG.n,[&] (size_t i) {
//if (DG.get_vertex(i).getOutDegree() == 0) tots[i] = 0;
//else {
if (DG.get_vertex(i).getOutDegree() != 0) {
sequence<uintE> base = sequence<uintE>();
//if (!count_only) {
// base = sequence<uintE>::no_init(k);
// base[0] = i;
//}
//I induced_space = I(DG, k, i);
//I induced_space = I(induced);
induced->setup(DG, k, i);
//if (induced_space.num_induced == 0) tots[i] = 0;
//else
n += KCliqueDir_rec<IN>(DG, 1, k, *induced, intersect_op, intersect_op_type, base, base_op, count_only);
//induced_space.del();
}
}//);
// {free(induced); induced = nullptr; }
if (induced != nullptr) { induced->del(); delete induced; }
}
IN::finish();
return n; //pbbslib::reduce_add(tots);
}
template <class IN, class I, class Graph, class F, class G, class H>
size_t KCliqueDirGen(Graph& DG, size_t k, F intersect_op, G intersect_op_type, H base_op, bool count_only);
template <class Graph>
void assert_induced_stack_thr(Graph& DG, size_t k = 1) {
size_t max_deg = get_max_deg(DG);
assert (max_deg*k <= INDUCED_STACK_THR);
}
template <class Graph, class F>
size_t assemble_induced_KCliqueDir(Graph& DG, size_t k, F inter_use, long subspace_type, bool count_only) {
auto nop_f = [] (sequence<uintE> b) {return;};
//auto lstintersect = [&](auto& DGA, size_t k_idx, size_t i, auto& induced_space, sequence<uintE>& base, bool to_save, auto& new_induced_space) {return lstintersect_induced(DGA, k_idx, k-1, i, induced_space, inter_use, base, count_only, to_save, new_induced_space);};
auto lstintersect = lstintersect_induced_struct2{};
if (subspace_type == 0) return KCliqueDir<InducedSpace_dyn, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
else if (subspace_type == 1) {
assert_induced_stack_thr(DG);
return KCliqueDir<InducedSpace_alloc, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
else if (subspace_type == 2) {
assert_induced_stack_thr(DG);
return KCliqueDir<InducedSpace_stack, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
else if (subspace_type == 3) {
assert_induced_stack_thr(DG, k);
return KCliqueDir<InducedSpace_rec, InducedSpace_dyn_setup>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
else if (subspace_type == 4) {
assert_induced_stack_thr(DG, k);
return KCliqueDir<InducedSpace_rec, InducedSpace_stack_setup>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
}
template <class Graph, class F>
size_t assemble_induced_KCliqueDirGen(Graph& DG, size_t k, F inter_use, long subspace_type, bool count_only) {
auto nop_f = [] (sequence<uintE> b) {return;};
//auto lstintersect = [&](auto& DGA, size_t k_idx, size_t i, auto& induced_space, sequence<uintE>& base, bool to_save, auto& new_induced_space) {return lstintersect_induced(DGA, k_idx, k-1, i, induced_space, inter_use, base, count_only, to_save, new_induced_space);};
auto lstintersect = lstintersect_induced_struct2{};
assert (subspace_type != 3 && subspace_type != 4);
// cannot use rec types
if (subspace_type == 0) return KCliqueDir<InducedSpace_dyn, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
else if (subspace_type == 1) {
assert_induced_stack_thr(DG);
return KCliqueDirGen<InducedSpace_alloc, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
else if (subspace_type == 2) {
assert_induced_stack_thr(DG);
return KCliqueDirGen<InducedSpace_stack, InducedSpace_dyn>(DG, k-1, lstintersect, inter_use, nop_f, count_only);
}
}
// induced
// generated
// -i 0 (simple gbbs intersect), -i 2 (simd intersect), -i 1 (graph set inter)
// -o 0 (goodrich), 1 (barnboimelkin approx), 2 (barenboimelkin exact)
// todo approx work and do some kind of break in gen if too much
// TODO get rid of duplicates in edge lists????
template <class Graph>
inline size_t KClique(Graph& GA, size_t k, long order_type = 0, double epsilon = 0.1,
bool gen_type = true, long space_type = 0, long subspace_type = 0, long inter_type = 0) {
using W = typename Graph::weight_type;
assert (k >= 1);
if (k == 1) return GA.n;
else if (k == 2) return GA.m;
sequence<uintE> rank;
timer t_rank; t_rank.start();
if (order_type == 0) rank = goodrichpszona_degen::DegeneracyOrder_intsort(GA, epsilon);
else if (order_type == 1) rank = barenboimelkin_degen::DegeneracyOrder(GA, epsilon);
else if (order_type == 2) {
rank = sequence<uintE>(GA.n, [&](size_t i) { return i; });
auto kcore = KCore(GA);
auto get_core = [&](uintE& p) -> uintE { return kcore[p]; };
integer_sort_inplace(rank.slice(), get_core);
}
else if (order_type == 3) rank = pbbslib::make_sequence(rankNodes(GA, GA.n), GA.n);
double tt_rank = t_rank.stop();
std::cout << "### Rank Running Time: " << tt_rank << std::endl;
timer t_filter; t_filter.start();
auto pack_predicate = [&](const uintE& u, const uintE& v, const W& wgh) {
return (rank[u] < rank[v]) && GA.get_vertex(u).getOutDegree() >= k-1 && GA.get_vertex(v).getOutDegree() >= k-1;
};
auto DG = relabel_graph(GA, rank.begin(), pack_predicate); //filter_graph(GA, pack_predicate);
double tt_filter = t_filter.stop();
std::cout << "### Filter Graph Running Time: " << tt_filter << std::endl;
// Done preprocessing
timer t; t.start();
size_t count = 0;
bool count_only = true;
if (!gen_type && space_type == 0) {
if (inter_type == 0){
count = assemble_induced_KCliqueDir(DG, k, lstintersect_par_struct{}, subspace_type, count_only);
}
else if (inter_type == 1) {
assert (DG.n < INT_MAX);
count = assemble_induced_KCliqueDir(DG, k, lstintersect_set_struct{}, subspace_type, count_only);
}
else if (inter_type == 2){
count = assemble_induced_KCliqueDir(DG, k, lstintersect_vec_struct{}, subspace_type, count_only);
}
else {
count = assemble_induced_KCliqueDir(DG, k, lstintersect_simple_struct{}, subspace_type, count_only);
}
}
else if (!gen_type && space_type == 1) {
auto nop_f = [] (sequence<uintE> b) {return;};
auto lstintersect = lstintersect_orig_struct{};
count = KCliqueDir<FullSpace_orig, FullSpace_orig>(DG, k-1, lstintersect, lstintersect_simple_struct{}, nop_f, count_only);
}
else if (!gen_type && space_type == 2) {
count = KCliqueDir_fast(DG, k-1);
}
else if (!gen_type && space_type == 3) {
count = KCliqueDir_fast_orig(DG, k-1);
}
/*else if (gen_type && space_type == 0) {
if (inter_type == 0){
count = assemble_induced_KCliqueDirGen(DG, k, lstintersect_par_struct{}, subspace_type, count_only);
}
else if (inter_type == 1) {
assert (DG.n < INT_MAX);
count = assemble_induced_KCliqueDirGen(DG, k, lstintersect_set_struct{}, subspace_type, count_only);
}
else {
count = assemble_induced_KCliqueDirGen(DG, k, lstintersect_vec_struct{}, subspace_type, count_only);
}
}*/
/*else if (!gen_type && space_type == 1) {
auto nop_f = [] (sequence<uintE> b) {return;};
auto inter_use = lstintersect_vec_struct{};
auto lstintersect = [&](auto& DGA, size_t k_idx, size_t i, auto& induced_space, sequence<uintE>& base, bool to_save, auto& new_induced_space) {return lstintersect_full(DGA, k_idx, k-1, i, induced_space, inter_use, base, count_only, to_save, new_induced_space);};
if (subspace_type == 0) count = KCliqueDir<FullSpace_bool_dyn, FullSpace_bool_dyn>(DG, k-1, lstintersect, nop_f, count_only);
//else if (subspace_type == 1) count = KCliqueDir<FullSpace_csv_hash_dyn, FullSpace_csv_hash_dyn>(DG, k-1, lstintersect, nop_f, count_only);
else count = KCliqueDir<FullSpace_csv_dyn, FullSpace_csv_dyn>(DG, k-1, lstintersect, nop_f, count_only);
}*/
/*if (!induced && !gen) count = KCliqueIndDir_alloc(DG, k-1, lstintersect_par_struct{}, nop_f, true); //count = KCliqueDir(DG, k-1);
else if (induced && !gen) {
if (inter == 0) count = KCliqueIndDir(DG, k-1, lstintersect_par_struct{}, nop_f, true);
else if (inter == 1) {
assert (DG.n < INT_MAX);
count = KCliqueIndDir(DG, k-1, lstintersect_set_struct{}, nop_f, true);
}
else if (inter == 2) count = KCliqueIndDir(DG, k-1, lstintersect_vec_struct{}, nop_f, true);
}
else if (induced && gen) {
if (inter == 0) count = KCliqueIndGenDir(DG, k-1, lstintersect_par_struct{}, nop_f, true);
else if (inter == 1) {
assert (DG.n < INT_MAX);
count = KCliqueIndGenDir(DG, k-1, lstintersect_set_struct{}, nop_f, true);
}
else if (inter == 2) count = KCliqueIndGenDir(DG, k-1, lstintersect_vec_struct{}, nop_f, true);
}*/
double tt = t.stop();
std::cout << "### Count Running Time: " << tt << std::endl;
std::cout << "### Num " << k << " cliques = " << count << "\n";
return count;
}
template <class IN, class I, class Graph, class F, class G, class H>
inline size_t KCliqueDirGen(Graph& DG, size_t k, F intersect_op, G intersect_op_type, H base_op, bool count_only) {
IN::init();
sequence<uintE> base = sequence<uintE>();
if (!count_only) base = sequence<uintE>::no_init(k);
switch (k) {
case 2: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = I(DG, k, a);
auto sizea = induceda.num_induced;
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto inducedb = IN();
if (count_only) {
sizeb = intersect_op(DG, 1, k, b, induceda, intersect_op_type, base, count_only, false, inducedb);
} else {
sizeb = intersect_op(DG, 1, k, b, induceda, intersect_op_type, base, count_only, true, inducedb);
if (sizeb >= k - 1) {
for (size_t xx = 0; xx < sizeb; xx++) {
base[2] = inducedb.induced[xx];
base_op(base);
}
}
inducedb.del();
}
storeb[b] = sizeb;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
IN::finish();
return pbbslib::reduce_add(storea);
break; }
case 3: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = I(DG, k, a);
auto sizea = induceda.num_induced;
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto inducedb = IN();
sizeb = intersect_op(DG, 1, k, b, induceda,intersect_op_type, base, count_only, true, inducedb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
auto inducedc = IN();
if (count_only) {
sizec = intersect_op(DG, 2, k, c, inducedb, intersect_op_type, base, count_only, false, inducedc);
} else {
sizec = intersect_op(DG, 2, k, c, inducedb, intersect_op_type, base, count_only, true, inducedc);
if (sizec >= k - 2) {
for (size_t xx = 0; xx < sizec; xx++) {
base[3] = inducedc.induced[xx];
base_op(base);
}
}
inducedc.del();
}
storec[c] = sizec;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
inducedb.del();
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
IN::finish();
return pbbslib::reduce_add(storea);
break; }
case 4: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = I(DG, k, a);
auto sizea = induceda.num_induced;
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto inducedb = IN();
sizeb = intersect_op(DG, 1, k, b, induceda,intersect_op_type, base, count_only, true, inducedb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
auto inducedc = IN();
sizec = intersect_op(DG, 2, k, c, inducedb,intersect_op_type, base, count_only, true, inducedc);
if (sizec >= k - 2) {
auto stored = sequence<size_t>::no_init(sizec);
parallel_for (0, sizec, [&] (size_t d) {
size_t sized = 0;
auto inducedd = IN();
if (count_only) {
sized = intersect_op(DG, 3, k, d, inducedc, intersect_op_type, base, count_only, false, inducedd);
} else {
sized = intersect_op(DG, 3, k, d, inducedc, intersect_op_type, base, count_only, true, inducedd);
if (sized >= k - 3) {
for (size_t xx = 0; xx < sized; xx++) {
base[4] = inducedd.induced[xx];
base_op(base);
}
}
inducedd.del();
}
stored[d] = sized;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
inducedc.del();
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
inducedb.del();
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
IN::finish();
return pbbslib::reduce_add(storea);
break; }
default:
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = I(DG, k, a);
auto sizea = induceda.num_induced;
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
auto inducedb = IN();
size_t sizeb = intersect_op(DG, 1, k, b, induceda, intersect_op_type, base, count_only, true, inducedb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
auto inducedc = IN();
size_t sizec = intersect_op(DG, 2, k, c, inducedb, intersect_op_type, base, count_only, true, inducedc);
if (sizec >= k - 2) {
auto stored = sequence<size_t>::no_init(sizec);
parallel_for (0, sizec, [&] (size_t d) {
auto inducedd = IN();
size_t sized = intersect_op(DG, 3, k, d, inducedc, intersect_op_type, base, count_only, true, inducedd);
if (sized >= k - 3) {
stored[d] = KCliqueDir_rec<IN>(DG, 4, k, inducedd, intersect_op, intersect_op_type, base, base_op, count_only);} else stored[d] = 0;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
IN::finish();
return pbbslib::reduce_add(storea);
}
}
/*
// keep track of induced subgraph as you go up -- store edge lists
// this would be P alpha k space; P k if we edidn't have induced subgraph, but longer to do k way intersect instead of 2 way intersect (k factor in work)
// TODO Pnk space without ordering; induced subgraphs have to be stored in hash tables
// can preinitialize k arrays of size n for each processor, and reuse when you do mem allocations -- check
// which processor is doing allocation and get the space assoc w/that processor
template <class Graph>
inline size_t KCliqueDir_rec(Graph& DG, size_t k_idx, size_t k, sequence<uintE> base) {
// intersect outneighbors of verts in base
auto lst_intersect = kintersect(DG, base, k_idx); // TODO hash table?, vectors, induced subgraph
size_t num_intersect = lst_intersect.size();
if (k_idx == k) {
return num_intersect;
}
//auto counts = sequence<size_t>(num_intersect);
size_t total_ct = 0;
// then, for each v in the intersection
for (size_t i=0; i < num_intersect; ++i) {
base[k_idx] = lst_intersect[i]; //if par here, must duplicate base
total_ct += KCliqueDir_rec(DG, k_idx+1, k, base);
}
// TODO leave this for now, unroll loop + reuse base in that -- write a program to generate loop unrolling
//auto count_seq = pbbslib::make_sequence<size_t>(counts, active_size);
//size_t count = pbbslib::reduce_add(count_seq);
return total_ct;
}
template <class Graph>
inline size_t KCliqueDir(Graph& DG, size_t k) {
// TODO divide work -- statically or by estimating prefix sum stuff
auto tots = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n,[&] (size_t i) {
//for (size_t i = 0; i < DG.n; ++i) {
auto base_idxs = sequence<uintE>::no_init(k);
base_idxs[0] = i;
tots[i] = KCliqueDir_rec(DG, 1, k, base_idxs);
});
return pbbslib::reduce_add(tots);
}
// base must have space for k if count_only = false
template <class Graph, class F, class G>
inline size_t KCliqueIndDir_rec(Graph& DG, size_t k_idx, size_t k, uintE* induced, size_t num_intersect, F lstintersect_sub,
sequence<uintE> base, G g_f, bool count_only = true) {
if (k_idx == k) {
g_f(base);
return num_intersect;
}
//auto counts = sequence<size_t>(num_intersect);
// then, for each v in the intersection
// optimization if counting and not listing
if (k_idx + 1 == k && count_only) {
auto counts = sequence<size_t>::no_init(num_intersect);
parallel_for (0, num_intersect, [&] (size_t i) {
auto tup = lstintersect(lstintersect_sub, DG, induced[i], induced, num_intersect, false);
counts[i] = std::get<1>(tup);
});
return pbbslib::reduce_add(counts);
}
size_t total_ct = 0;
for (size_t i=0; i < num_intersect; ++i) {
if (!count_only) base[k_idx] = induced[i];
auto new_induced_tup = lstintersect(lstintersect_sub, DG, induced[i], induced, num_intersect, true);
auto new_induced = std::get<0>(new_induced_tup);
auto new_induced_size = std::get<1>(new_induced_tup);
if (new_induced_size > 0) {
total_ct += KCliqueIndDir_rec(DG, k_idx+1, k, new_induced, new_induced_size, lstintersect_sub, base, g_f, count_only);
pbbs::delete_array<uintE>(new_induced, new_induced_size);
}
}
//auto count_seq = pbbslib::make_sequence<size_t>(counts, active_size);
//size_t count = pbbslib::reduce_add(count_seq);
return total_ct;
}
//TODO del array in ind gen dyn
template <class Graph, class F, class G>
inline size_t KCliqueIndDir(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
// TODO divide work -- statically or by estimating prefix sum stuff
auto tots = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n,[&] (size_t i) {
if (DG.get_vertex(i).getOutDegree() == 0) {
tots[i] = 0;
} else {
sequence<uintE> base = sequence<uintE>();
if (!count_only) {
base = sequence<uintE>::no_init(k);
base[0] = i;
}
tots[i] = KCliqueIndDir_rec(DG, 1, k, (uintE*)(DG.get_vertex(i).getOutNeighbors()), DG.get_vertex(i).getOutDegree(), lstintersect_sub, base, g_f, count_only);
}
});
return pbbslib::reduce_add(tots);
}
template <class Graph, class F, class G>
inline size_t KCliqueIndDir_stack_rec(Graph& DG, size_t k_idx, size_t k, uintE* induced, size_t induced_size, F lstintersect_sub,
sequence<uintE> base, G g_f, bool count_only = true) {
if (k_idx == k) {
g_f(base);
return induced_size;
}
// then, for each v in the intersection
uintE induced_ptr[INDUCED_STACK_THR];
// optimization if counting and not listing
if (k_idx + 1 == k && count_only) {
auto counts = sequence<size_t>::no_init(induced_size);
parallel_for (0, induced_size, [&] (size_t i) {
counts[i] = std::get<1>(lstintersect(lstintersect_sub, DG, induced[i], induced, induced_size, false, induced_ptr));
});
return pbbslib::reduce_add(counts);
}
size_t total_ct = 0;
for (size_t i=0; i < induced_size; ++i) {
if (!count_only) base[k_idx] = induced[i];
auto new_induced_size = std::get<1>(lstintersect(lstintersect_sub, DG, induced[i], induced, induced_size, true, induced_ptr));
//if (new_induced_size > 0) assert((induced_ptr + (k_idx * INDUCED_STACK_THR))[0] < DG.n);
if (new_induced_size > 0) total_ct += KCliqueIndDir_stack_rec(DG, k_idx+1, k, induced_ptr, new_induced_size, lstintersect_sub, base, g_f, count_only);
}
//auto count_seq = pbbslib::make_sequence<size_t>(counts, active_size);
//size_t count = pbbslib::reduce_add(count_seq);
return total_ct;
}
template <class Graph, class F, class G>
inline size_t KCliqueIndDir_stack(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
auto idxs = sequence<size_t>::no_init(DG.n);
parallel_for (0,DG.n,[&] (size_t i) { idxs[i] = DG.get_vertex(i).getOutDegree(); });
auto base_deg_f = [&](size_t i, size_t j) -> size_t {
return idxs[i] > idxs[j] ? idxs[i] : idxs[j];
};
size_t max_deg = pbbslib::reduce(idxs, pbbslib::make_monoid(base_deg_f, 0));
assert (max_deg <= INDUCED_STACK_THR);
// TODO divide work -- statically or by estimating prefix sum stuff
auto tots = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n,[&] (size_t i) {
if (DG.get_vertex(i).getOutDegree() == 0) {
tots[i] = 0;
} else{
sequence<uintE> base = sequence<uintE>();
if (!count_only) {
base = sequence<uintE>::no_init(k);
base[0] = i;
}
uintE induced_ptr[INDUCED_STACK_THR];
for (size_t j=0; j < DG.get_vertex(i).getOutDegree(); ++j) {
induced_ptr[j] = DG.get_vertex(i).getOutNeighbor(j);
}
//auto induced = pbbslib::make_sequence<uintE>(induced_ptr, DG.get_vertex(i).getOutDegree());
tots[i] = KCliqueIndDir_stack_rec(DG, 1, k, induced_ptr, DG.get_vertex(i).getOutDegree(), lstintersect_sub, base, g_f, count_only);
}
});
return pbbslib::reduce_add(tots);
}
template <class Graph, class F, class G>
inline size_t KCliqueIndDir_alloc_rec(Graph& DG, size_t k_idx, size_t k, uintE* induced, size_t granularity, size_t induced_size, F lstintersect_sub,
sequence<uintE> base, G g_f, bool count_only = true) {
if (k_idx == k) {
g_f(base);
return induced_size;
}
// then, for each v in the intersection
auto new_induced = induced + granularity;
// optimization if counting and not listing
if (k_idx + 1 == k && count_only) {
auto counts = sequence<size_t>::no_init(induced_size);
parallel_for (0, induced_size, [&] (size_t i) {
counts[i] = std::get<1>(lstintersect(lstintersect_sub, DG, induced[i], induced, induced_size, false, new_induced));
});
return pbbslib::reduce_add(counts);
}
size_t total_ct = 0;
for (size_t i=0; i < induced_size; ++i) {
if (!count_only) base[k_idx] = induced[i];
auto new_induced_size = std::get<1>(lstintersect(lstintersect_sub, DG, induced[i], induced, induced_size, true, new_induced));
//if (new_induced_size > 0) assert((induced_ptr + (k_idx * INDUCED_STACK_THR))[0] < DG.n);
if (new_induced_size >= k - k_idx) total_ct += KCliqueIndDir_alloc_rec(DG, k_idx+1, k, new_induced, granularity, new_induced_size, lstintersect_sub, base, g_f, count_only);
}
//auto count_seq = pbbslib::make_sequence<size_t>(counts, active_size);
//size_t count = pbbslib::reduce_add(count_seq);
return total_ct;
}
// TODO prune all vert with deg < k
template <class Graph, class F, class G>
inline size_t KCliqueIndDir_alloc(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
auto idxs = sequence<size_t>::no_init(DG.n);
parallel_for (0,DG.n,[&] (size_t i) { idxs[i] = DG.get_vertex(i).getOutDegree(); });
auto base_deg_f = [&](size_t i, size_t j) -> size_t {
return idxs[i] > idxs[j] ? idxs[i] : idxs[j];
};
size_t max_deg = pbbslib::reduce(idxs, pbbslib::make_monoid(base_deg_f, 0));
assert (k * max_deg <= INDUCED_STACK_THR);
using induced_alloc = list_allocator<uintE[INDUCED_STACK_THR]>;
induced_alloc::init();
// TODO divide work -- statically or by estimating prefix sum stuff
auto tots = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n,[&] (size_t i) {
if (DG.get_vertex(i).getOutDegree() < k) {
tots[i] = 0;
} else{
auto induced_ptr = induced_alloc::alloc();
sequence<uintE> base = sequence<uintE>();
if (!count_only) {
base = sequence<uintE>::no_init(k);
base[0] = i;
}
for (size_t j=0; j < DG.get_vertex(i).getOutDegree(); ++j) {
(*induced_ptr)[j] = DG.get_vertex(i).getOutNeighbor(j);
}
auto induced_deg = DG.get_vertex(i).getOutDegree();
tots[i] = KCliqueIndDir_alloc_rec(DG, 1, k, *induced_ptr, induced_deg, induced_deg, lstintersect_sub, base, g_f, count_only);
induced_alloc::free(induced_ptr);
}
});
induced_alloc::finish();
return pbbslib::reduce_add(tots);
}
//GENERATED
// TODO keep array of size order alpha per processor???
template <class Graph, class F, class G>
inline size_t KCliqueIndGenDir(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
auto base_idxs = sequence<size_t>::no_init(DG.n);
parallel_for (0,DG.n,[&] (size_t i) { base_idxs[i] = DG.get_vertex(i).getOutDegree(); });
auto base_deg_f = [&](size_t i, size_t j) -> size_t {
return base_idxs[i] > base_idxs[j] ? base_idxs[i] : base_idxs[j];
};
size_t max_deg = pbbslib::reduce(base_idxs, pbbslib::make_monoid(base_deg_f, 0));
if (max_deg <= INDUCED_STACK_THR) return KCliqueIndGenDir_stack(DG, k, lstintersect_sub, g_f, count_only);
return KCliqueIndGenDir_dyn(DG, k, lstintersect_sub, g_f, count_only);
}
// using stack
template <class Graph, class F, class G>
inline size_t KCliqueIndGenDir_stack(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
switch (k) {
case 2: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
uintE ptr_storeb[INDUCED_STACK_THR];
auto storeb = pbbslib::make_sequence<uintE>(ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
if (count_only) {
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, false);
sizeb = std::get<1>(tupleb);
} else {
uintE ptr_inducedb[INDUCED_STACK_THR];
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, ptr_inducedb);
sizeb = std::get<1>(tupleb);
auto inducedb = std::get<0>(tupleb);
if (sizeb >= k - 1) {
uintE ptr_base[INDUCED_STACK_THR];
auto base = pbbslib::make_sequence<uintE>(ptr_base, k);
base[0] = a;
base[1] = induceda[b];
for (size_t xx = 0; xx < sizeb; xx++) {
base[2] = inducedb[xx];
g_f(base);
}
}
}
storeb[b] = sizeb;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
case 3: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
uintE ptr_storeb[INDUCED_STACK_THR];
auto storeb = pbbslib::make_sequence<uintE>(ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
uintE ptr_inducedb[INDUCED_STACK_THR];
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, ptr_inducedb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
uintE ptr_storec[INDUCED_STACK_THR];
auto storec = pbbslib::make_sequence<uintE>(ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
if (count_only) {
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, false);
sizec = std::get<1>(tuplec);
} else {
uintE ptr_inducedc[INDUCED_STACK_THR];
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, ptr_inducedc);
sizec = std::get<1>(tuplec);
auto inducedc = std::get<0>(tuplec);
if (sizec >= k - 2) {
uintE ptr_base[INDUCED_STACK_THR];
auto base = pbbslib::make_sequence<uintE>(ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
for (size_t xx = 0; xx < sizec; xx++) {
base[3] = inducedc[xx];
g_f(base);
}
}
}
storec[c] = sizec;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
case 4: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
uintE ptr_storeb[INDUCED_STACK_THR];
auto storeb = pbbslib::make_sequence<uintE>(ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
uintE ptr_inducedb[INDUCED_STACK_THR];
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, ptr_inducedb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
uintE ptr_storec[INDUCED_STACK_THR];
auto storec = pbbslib::make_sequence<uintE>(ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
uintE ptr_inducedc[INDUCED_STACK_THR];
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, ptr_inducedc);
sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto inducedc = std::get<0>(tuplec);
uintE ptr_stored[INDUCED_STACK_THR];
auto stored = pbbslib::make_sequence<uintE>(ptr_stored, sizec);
parallel_for (0, sizec, [&] (size_t d) {
size_t sized = 0;
if (count_only) {
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, false);
sized = std::get<1>(tupled);
} else {
uintE ptr_inducedd[INDUCED_STACK_THR];
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true, ptr_inducedd);
sized = std::get<1>(tupled);
auto inducedd = std::get<0>(tupled);
if (sized >= k - 3) {
uintE ptr_base[INDUCED_STACK_THR];
auto base = pbbslib::make_sequence<uintE>(ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
for (size_t xx = 0; xx < sized; xx++) {
base[4] = inducedd[xx];
g_f(base);
}
}
}
stored[d] = sized;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
default:
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
uintE ptr_storeb[INDUCED_STACK_THR];
auto storeb = pbbslib::make_sequence<uintE>(ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
uintE ptr_inducedb[INDUCED_STACK_THR];
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, ptr_inducedb);
size_t sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
uintE ptr_storec[INDUCED_STACK_THR];
auto storec = pbbslib::make_sequence<uintE>(ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
uintE ptr_inducedc[INDUCED_STACK_THR];
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, ptr_inducedc);
size_t sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto inducedc = std::get<0>(tuplec);
uintE ptr_stored[INDUCED_STACK_THR];
auto stored = pbbslib::make_sequence<uintE>(ptr_stored, sizec);
parallel_for (0, sizec, [&] (size_t d) {
uintE ptr_inducedd[INDUCED_STACK_THR];
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true, ptr_inducedd);
auto inducedd = std::get<0>(tupled);
size_t sized = std::get<1>(tupled);
if (sized >= k - 3) {
auto base = sequence<uintE>();
if (!count_only) {
uintE ptr_base[INDUCED_STACK_THR];
base = sequence<uintE>(ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
stored[d] = KCliqueIndDir_rec(DG, 4, k, inducedd, sized, lstintersect_sub, base, g_f, count_only);
} else stored[d] = KCliqueIndDir_rec(DG, 4, k, inducedd, sized, lstintersect_sub, base, g_f, count_only);} else stored[d] = 0;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
}
}
// using list allocator
template <class Graph, class F, class G>
inline size_t KCliqueIndGenDir_alloc(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
using induced_alloc = list_allocator<uintE[INDUCED_STACK_THR]>;
induced_alloc::init();
switch (k) {
case 2: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto ptr_storeb = induced_alloc::alloc();
auto storeb = pbbslib::make_sequence<uintE>(*ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
if (count_only) {
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, false);
sizeb = std::get<1>(tupleb);
} else {
auto ptr_inducedb = induced_alloc::alloc();
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, *ptr_inducedb);
sizeb = std::get<1>(tupleb);
auto inducedb = std::get<0>(tupleb);
if (sizeb >= k - 1) {
auto ptr_base = induced_alloc::alloc();
auto base = pbbslib::make_sequence<uintE>(*ptr_base, k);
base[0] = a;
base[1] = induceda[b];
for (size_t xx = 0; xx < sizeb; xx++) {
base[2] = inducedb[xx];
g_f(base);
}
induced_alloc::free(ptr_base);
}
induced_alloc::free(ptr_inducedb);
}
storeb[b] = sizeb;
});
storea[a] = pbbslib::reduce_add(storeb);
induced_alloc::free(ptr_storeb);
} else storea[a] = 0;
});
induced_alloc::finish();
return pbbslib::reduce_add(storea);
break; }
case 3: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto ptr_storeb = induced_alloc::alloc();
auto storeb = pbbslib::make_sequence<uintE>(*ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto ptr_inducedb = induced_alloc::alloc();
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, *ptr_inducedb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
auto ptr_storec = induced_alloc::alloc();
auto storec = pbbslib::make_sequence<uintE>(*ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
if (count_only) {
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, false);
sizec = std::get<1>(tuplec);
} else {
auto ptr_inducedc = induced_alloc::alloc();
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, *ptr_inducedc);
sizec = std::get<1>(tuplec);
auto inducedc = std::get<0>(tuplec);
if (sizec >= k - 2) {
auto ptr_base = induced_alloc::alloc();
auto base = pbbslib::make_sequence<uintE>(*ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
for (size_t xx = 0; xx < sizec; xx++) {
base[3] = inducedc[xx];
g_f(base);
}
induced_alloc::free(ptr_base);
}
induced_alloc::free(ptr_inducedc);
}
storec[c] = sizec;
});
storeb[b] = pbbslib::reduce_add(storec);
induced_alloc::free(ptr_storec);} else storeb[b] = 0;
induced_alloc::free(ptr_inducedb);
});
storea[a] = pbbslib::reduce_add(storeb);
induced_alloc::free(ptr_storeb);
} else storea[a] = 0;
});
induced_alloc::finish();
return pbbslib::reduce_add(storea);
break; }
case 4: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto ptr_storeb = induced_alloc::alloc();
auto storeb = pbbslib::make_sequence<uintE>(*ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto ptr_inducedb = induced_alloc::alloc();
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, *ptr_inducedb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
auto ptr_storec = induced_alloc::alloc();
auto storec = pbbslib::make_sequence<uintE>(*ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
auto ptr_inducedc = induced_alloc::alloc();
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, *ptr_inducedc);
sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto inducedc = std::get<0>(tuplec);
auto ptr_stored = induced_alloc::alloc();
auto stored = pbbslib::make_sequence<uintE>(*ptr_stored, sizec);
parallel_for (0, sizec, [&] (size_t d) {
size_t sized = 0;
if (count_only) {
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, false);
sized = std::get<1>(tupled);
} else {
auto ptr_inducedd = induced_alloc::alloc();
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true, *ptr_inducedd);
sized = std::get<1>(tupled);
auto inducedd = std::get<0>(tupled);
if (sized >= k - 3) {
auto ptr_base = induced_alloc::alloc();
auto base = pbbslib::make_sequence<uintE>(*ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
for (size_t xx = 0; xx < sized; xx++) {
base[4] = inducedd[xx];
g_f(base);
}
induced_alloc::free(ptr_base);
}
induced_alloc::free(ptr_inducedd);
}
stored[d] = sized;
});
storec[c] = pbbslib::reduce_add(stored);
induced_alloc::free(ptr_stored);} else storec[c] = 0;
induced_alloc::free(ptr_inducedc);
});
storeb[b] = pbbslib::reduce_add(storec);
induced_alloc::free(ptr_storec);} else storeb[b] = 0;
induced_alloc::free(ptr_inducedb);
});
storea[a] = pbbslib::reduce_add(storeb);
induced_alloc::free(ptr_storeb);
} else storea[a] = 0;
});
induced_alloc::finish();
return pbbslib::reduce_add(storea);
break; }
default:
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto ptr_storeb = induced_alloc::alloc();
auto storeb = pbbslib::make_sequence<uintE>(*ptr_storeb, sizea);
parallel_for (0, sizea, [&] (size_t b) {
auto ptr_inducedb = induced_alloc::alloc();
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true, *ptr_inducedb);
size_t sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto inducedb = std::get<0>(tupleb);
auto ptr_storec = induced_alloc::alloc();
auto storec = pbbslib::make_sequence<uintE>(*ptr_storec, sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
auto ptr_inducedc = induced_alloc::alloc();
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true, *ptr_inducedc);
size_t sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto inducedc = std::get<0>(tuplec);
auto ptr_stored = induced_alloc::alloc();
auto stored = pbbslib::make_sequence<uintE>(*ptr_stored, sizec);
parallel_for (0, sizec, [&] (size_t d) {
auto ptr_inducedd = induced_alloc::alloc();
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true, *ptr_inducedd);
auto inducedd = std::get<0>(tupled);
size_t sized = std::get<1>(tupled);
if (sized >= k - 3) {
auto base = sequence<uintE>();
if (!count_only) {
auto ptr_base = induced_alloc::alloc();
base = sequence<uintE>(*ptr_base, k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
stored[d] = KCliqueIndDir_rec(DG, 4, k, inducedd, sized, lstintersect_sub, base, g_f, count_only);
induced_alloc::free(ptr_base); }
else stored[d] = KCliqueIndDir_rec(DG, 4, k, inducedd, sized, lstintersect_sub, base, g_f, count_only);} else stored[d] = 0;
});
storec[c] = pbbslib::reduce_add(stored);
induced_alloc::free(ptr_stored);} else storec[c] = 0;
induced_alloc::free(ptr_inducedc);
});
storeb[b] = pbbslib::reduce_add(storec);
induced_alloc::free(ptr_storec);} else storeb[b] = 0;
induced_alloc::free(ptr_inducedb);
});
storea[a] = pbbslib::reduce_add(storeb);
induced_alloc::free(ptr_storeb);
} else storea[a] = 0;
});
induced_alloc::finish();
return pbbslib::reduce_add(storea);
}
}
template <class Graph, class F, class G>
inline size_t KCliqueIndGenDir_dyn(Graph& DG, size_t k, F lstintersect_sub, G g_f, bool count_only = true) {
switch (k) {
case 2: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
if (count_only) {
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, false);
sizeb = std::get<1>(tupleb);
} else {
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true);
auto inducedb = std::get<0>(tupleb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto base = sequence<uintE>::no_init(k);
base[0] = a;
base[1] = induceda[b];
for (size_t xx = 0; xx < sizeb; xx++) {
base[2] = inducedb[xx];
g_f(base);
}
}
if (inducedb) pbbs::delete_array<uintE>(inducedb, sizeb);
}
storeb[b] = sizeb;
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
case 3: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true);
auto inducedb = std::get<0>(tupleb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
if (count_only) {
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, false);
sizec = std::get<1>(tuplec);
} else {
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true);
auto inducedc = std::get<0>(tuplec);
sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto base = sequence<uintE>::no_init(k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
for (size_t xx = 0; xx < sizec; xx++) {
base[3] = inducedc[xx];
g_f(base);
}
}
if (inducedc) pbbs::delete_array<uintE>(inducedc, sizec);
}
storec[c] = sizec;
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
if (inducedb) pbbs::delete_array<uintE>(inducedb, sizeb);
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
case 4: {
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
size_t sizeb = 0;
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true);
auto inducedb = std::get<0>(tupleb);
sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
size_t sizec = 0;
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true);
auto inducedc = std::get<0>(tuplec);
sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto stored = sequence<size_t>::no_init(sizec);
parallel_for (0, sizec, [&] (size_t d) {
size_t sized = 0;
if (count_only) {
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, false);
sized = std::get<1>(tupled);
} else {
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true);
auto inducedd = std::get<0>(tupled);
sized = std::get<1>(tupled);
if (sized >= k - 3) {
auto base = sequence<uintE>::no_init(k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
for (size_t xx = 0; xx < sized; xx++) {
base[4] = inducedd[xx];
g_f(base);
}
}
if (inducedd) pbbs::delete_array<uintE>(inducedd, sized);
}
stored[d] = sized;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
if (inducedc) pbbs::delete_array<uintE>(inducedc, sizec);
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
if (inducedb) pbbs::delete_array<uintE>(inducedb, sizeb);
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
break; }
default:
auto storea = sequence<size_t>::no_init(DG.n);
parallel_for (0, DG.n, [&] (size_t a) {
auto induceda = (uintE*)(DG.get_vertex(a).getOutNeighbors());
auto sizea = DG.get_vertex(a).getOutDegree();
if (sizea >= k) {
auto storeb = sequence<size_t>::no_init(sizea);
parallel_for (0, sizea, [&] (size_t b) {
auto tupleb = lstintersect(lstintersect_sub, DG, induceda[b], induceda, sizea, true);
auto inducedb = std::get<0>(tupleb);
size_t sizeb = std::get<1>(tupleb);
if (sizeb >= k - 1) {
auto storec = sequence<size_t>::no_init(sizeb);
parallel_for (0, sizeb, [&] (size_t c) {
auto tuplec = lstintersect(lstintersect_sub, DG, inducedb[c], inducedb, sizeb, true);
auto inducedc = std::get<0>(tuplec);
size_t sizec = std::get<1>(tuplec);
if (sizec >= k - 2) {
auto stored = sequence<size_t>::no_init(sizec);
parallel_for (0, sizec, [&] (size_t d) {
auto tupled = lstintersect(lstintersect_sub, DG, inducedc[d], inducedc, sizec, true);
auto inducedd = std::get<0>(tupled);
size_t sized = std::get<1>(tupled);
if (sized >= k - 3) {
auto base = sequence<uintE>();
if (!count_only) {
base = sequence<uintE>::no_init(k);
base[0] = a;
base[1] = induceda[b];
base[2] = inducedb[c];
base[3] = inducedc[d];
}
stored[d] = KCliqueIndDir_rec(DG, 4, k, inducedd, sized, lstintersect_sub, base, g_f, count_only);} else stored[d] = 0;
});
storec[c] = pbbslib::reduce_add(stored);} else storec[c] = 0;
if (inducedc) pbbs::delete_array<uintE>(inducedc, sizec);
});
storeb[b] = pbbslib::reduce_add(storec);} else storeb[b] = 0;
if (inducedb) pbbs::delete_array<uintE>(inducedb, sizeb);
});
storea[a] = pbbslib::reduce_add(storeb);
} else storea[a] = 0;
});
return pbbslib::reduce_add(storea);
}
}
//size_t temp(graph<vertex<W>>& DG, size_t k) {
// auto storea = pbbslib::no_init<size_t>(DG.n);
// parallel_for (0, DG.n, [&] (size_t a) {
// auto induceda = pbbslib::make_sequence<uintE>((uintE*)(DG.V[a].getOutNeighbors()), DG.V[a].getOutDegree());
// auto storeb = pbbslib::no_init<size_t>(induceda.size());
// parallel_for (0, induceda.size(), [&] (size_t b) {
// auto inducedb = lstintersect(DG, induceda[b], induceda);
// for k = 3, store size of induced here; to be returned
// storeb[b] = inducedb.size();
// if we were recursing further, we would invoke the recursive version here
// });
// reduce storeb; store that in storea
// storea[a] = pbbslib::reduce_add(storeb);
// });
// return pbbslib::reduce_add(storea);
//}
// Ideas:
// hash table for outvert of G -- would make it technically work-efficient, to do intersection
// where should we parallelize? first level only? through? be careful of space usage
// would approx kcore be faster if we used buckets instead? instead of the sort?
*/ |
4651.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel num_threads(4)
{
#pragma omp for schedule(static, 16)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp for private (j) schedule(static, 16)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
Searching.202003021000.profile_para_top_m_search.h | //
// Created by Zhen Peng on 11/11/19.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
int dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
dataf vertex_norm) const;
// idi dimension)
static idi insert_into_queue(
std::vector<Candidate> &c_queue,
idi c_queue_top,
Candidate cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
// idi insert_into_queue_nsg(
// std::vector< Candidate > &c_queue,
// idi c_queue_top,
// Candidate cand);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static void merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L);
idi merge_all_queues_para_array(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
std::vector<Candidate> &set_L,
const idi L);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
distf dist_min_ = 0;
distf dist_max_ = 0;
// L3CacheMissRate profile_miss_rate;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
unsigned L) const;
// void prepare_candidate_queue_list(
// const float *query_load,
// std::vector<std::vector<efanna2e::Neighbor> > &retset_list,
// std::vector<boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<unsigned> &init_ids,
// const boost::dynamic_bitset<> &flags,
// unsigned batch_start,
// unsigned batch_size,
// unsigned L);
// void search_in_batch(
//// const float *query_load,
// size_t K,
// size_t L,
// unsigned batch_start,
// unsigned batch_size,
// std::vector< std::vector<Candidate> > &set_L_list,
// std::vector< boost::dynamic_bitset<> > &is_visited_list,
// const std::vector<idi> &init_ids,
// const boost::dynamic_bitset<> &is_visited,
// std::vector<std::vector<idi> > &set_K_list);
void search_in_sequential(
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
// boost::dynamic_bitset<> &is_visited,
// boost::dynamic_bitset<> is_visited,
// std::vector<idi> &init_ids,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void search_in_sequential_BitVector(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void search_in_sequential_prune_neighbors(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// idi get_out_degree(idi v_id) const
// {
// if (v_id < num_v_ - 1) {
// return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id];
// } else {
// return num_e_ - nsg_graph_indices_[v_id];
// }
// }
void search_with_top_m(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector< std::vector<idi> > &top_m_list);
void search_with_top_m_myths_M(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void search_with_top_m_to_get_distance_range(
const PANNS::idi M,
const PANNS::idi query_id,
// const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids);
void search_with_top_m_profile_bit_CAS(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void search_with_top_m_profile_prune_neighbors(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// void search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited);
void search_with_top_m_in_batch(
PANNS::idi M,
PANNS::idi batch_start,
PANNS::idi batch_size,
PANNS::idi K,
PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list);
void para_search_with_top_m_critical_area(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void para_search_with_top_m_critical_area_no_omp(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void para_search_with_top_m_critical_area_yes_omp(
idi M,
idi query_id,
idi K,
idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void para_search_with_top_m_visited_array(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<uint8_t> &is_visited);
void para_search_with_top_m_merge_queues(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void para_search_with_top_m_queues_seq_merge(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
void para_search_with_top_m_merge_queues_no_CAS(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length,
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
// void para_search_with_top_m_merge_queues_in_array(
void para_search_with_top_m_merge_queues_new_threshold(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited);
// std::vector<uint8_t> &is_visited);
boost::dynamic_bitset<> &is_visited);
void para_search_with_top_m_merge_queues_by_sort(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &dest_offsets,
const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L.
BitVector &is_visited);
void para_search_with_top_m_merge_queues_myths(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
BitVector &is_visited);
// std::vector<uint8_t> &is_visited);
// boost::dynamic_bitset<> &is_visited);
// void para_prepare_init_ids(
// std::vector<unsigned> &init_ids,
// unsigned L) const;
void para_search_with_top_m_in_batch_embarassing_para(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list);
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
inline void Searching::search_in_sequential(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_in_sequential_BitVector(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// boost::dynamic_bitset<> is_visited(num_v_);
BitVector is_visited(num_v_);
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
is_visited.atomic_set_bit(init_ids[v_i]);
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
{// Self-defined BitVector
if (is_visited.atomic_is_bit_set(nb_id)) {
continue;
}
is_visited.atomic_set_bit(nb_id);
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
#pragma omp parallel for
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::search_in_sequential_prune_neighbors(
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
// BitVector is_visited(num_v_);
//#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
is_visited[init_ids[v_i]] = true;
// is_visited.atomic_set_bit(init_ids[v_i]);
}
const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
idi k = 0; // Index of every queue's first unchecked candidate.
while (k < L) {
Candidate &top_cand = set_L[k];
unsigned nk = L;
if (!top_cand.is_checked_) {
top_cand.is_checked_ = true;
idi v_id = top_cand.id_; // Vertex ID.
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
// Traverse v_id's all neighbors, pushing them into the queue
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// Compute the distance
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Insert into the queue
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
if (nk <= k) {
k = nk;
} else {
++k;
}
}
// cache_miss_kernel.measure_stop();
//#pragma omp parallel for
for (size_t k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//// DEPRECATED.
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
//inline idi Searching::add_into_queue(
// std::vector<PANNS::Candidate> &queue,
// idi &queue_top,
// const idi queue_size,
// const PANNS::Candidate &cand)
//{
// assert(queue_size > 1);
// if (0 == queue_top) {
// queue[queue_top++] = cand;
// return 0;
// } else if (1 == queue_top) {
// if (queue[0] < cand) {
// queue[queue_top++] = cand;
// return 1;
// } else {
// queue[++queue_top] = queue[0];
// queue[0] = cand;
// return 0;
// }
// }
//
// if (queue[queue_top - 1] < cand) {
// if (queue_top < queue_size) {
// queue[queue_top++] = cand;
// }
// return queue_top;
// }
//
// idi r = insert_into_queue(
// queue,
// queue_top - 1,
// cand);
//// {//test
//// printf("r: %u"
//// "queue_top: %u "
//// "queue_size: %u\n",
//// r,
//// queue_top,
//// queue_size);
//// }
// return r;
//
//// /////////////////////////////////////////////////////////////
//// // Find the insert location
//// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
//// idi insert_loc = it_loc - queue.begin();
//// if (insert_loc == queue_size) {
//// return queue_size;
//// }
////
//// // Insert
////// if (queue_top == queue_size) {
////// // If full already
////// --queue_top;
////// }
//// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
//// reinterpret_cast<char *>(queue.data() + insert_loc),
//// (queue_top - insert_loc) * sizeof(Candidate));
////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
////// queue.at(q_i) = queue.at(q_i - 1);
////// }
//// queue[insert_loc] = cand;
//// ++queue_top;
//// return insert_loc;
//}
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
idi &queue_top,
const idi queue_size,
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_top++] = cand;
return 0;
}
// Find the insert location
auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size) {
return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_top, // The insertion location starting from queue_start
const idi queue_size, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_top) {
queue[queue_start + queue_top++] = cand;
return 0;
}
idi queue_end = queue_start + queue_top;
// Find the insert location
auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc == queue_size + queue_start) {
// if (insert_loc == queue_size) {
return queue_size + queue_start;
// return queue_size;
}
// Insert
if (queue_top == queue_size) {
// If full already
--queue_top;
--queue_end;
}
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
// (queue_top - insert_loc) * sizeof(Candidate));
// for (idi q_i = queue_top; q_i > insert_loc; --q_i) {
// queue.at(q_i) = queue.at(q_i - 1);
// }
queue[insert_loc] = cand;
++queue_top;
return insert_loc;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/**
* PANNS version of InsertIntoPool(): binary-search to find the insert place and then move.
* @param[out] c_queue
* @param c_queue_top
* @param cand
* @return
*/
inline idi Searching::insert_into_queue(
std::vector<PANNS::Candidate> &c_queue,
PANNS::idi c_queue_top,
PANNS::Candidate cand)
{
if (c_queue[0].distance_ > cand.distance_) {
// If the first
memmove(reinterpret_cast<char *>(c_queue.data() + 1),
reinterpret_cast<char *>(c_queue.data()),
c_queue_top * sizeof(Candidate));
c_queue[0] = cand;
return 0;
} else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) {
// If the last
if (c_queue[c_queue_top - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
c_queue[c_queue_top - 1] = cand;
return c_queue_top - 1;
} else {
return c_queue_top;
}
}
idi left = 0;
idi right = c_queue_top;
while (left < right) {
idi mid = (right - left) / 2 + left;
if (c_queue[mid].distance_ > cand.distance_) {
right = mid;
} else {
left = mid + 1;
}
}
// If the distance is the same
if (0 != left && c_queue[left - 1].distance_ != cand.distance_) {
;
} else {
while (0 != left
&& c_queue[left - 1].distance_ == cand.distance_
&& c_queue[left - 1].id_ > cand.id_) {
// Use ID as the second metrics for ordering
--left;
}
}
// Insert to left
memmove(reinterpret_cast<char *>(c_queue.data() + left + 1),
reinterpret_cast<char *>(c_queue.data() + left),
(c_queue_top - left) * sizeof(Candidate));
c_queue[left] = cand;
return left;
}
//inline void Searching::cand_pushes_ngbrs_into_queue(
// idi cand_id,
// const dataf *query_data,
// idi L,
// idi &new_k,
// boost::dynamic_bitset<> &is_visited,
// std::vector<Candidate> &set_L)
//{
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
//}
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
// Candidate &top_cand = set_L[k];
// unsigned nk = L;
// if (!top_cand.is_checked_) {
// top_cand.is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
// Deprecated: cannot use std::set, because its element is constant.
//inline void Searching::search_in_sequential(
// const idi query_id,
// const idi K,
// const idi L,
//// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K) const
//{
// std::set<Candidate> set_L;
// boost::dynamic_bitset<> is_visited(num_v_);
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// is_visited[init_ids[v_i]] = true;
// }
// const dataf *query_data = queries_load_ + query_id * dimension_;
//
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
//// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// set_L.emplace(v_id, dist, false);
// }
//// std::sort(set_L.begin(), set_L.begin() + L);
// idi k = 0; // Index of every queue's first unchecked candidate.
// while (k < L) {
//// Candidate &top_cand = set_L[k];
// std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k);
// unsigned nk = L;
// if (!top_cand->is_checked_) {
// top_cand->is_checked_ = true;
// idi v_id = top_cand.id_; // Vertex ID.
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// // Traverse v_id's all neighbors, pushing them into the queue
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
// // Compute the distance
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
// Candidate cand(nb_id, dist, false);
// // Insert into the queue
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// if (nk <= k) {
// k = nk;
// } else {
// ++k;
// }
// }
//
// for (size_t k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
//}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// auto *queue1_base = queue1.data() + queue1_start;
// Insert the 1st of queue2
insert_one_element_at(
queue2[queue2_start],
// queue1.data(),
queue1,
insert_index,
queue1_start,
queue1_size);
if (queue2_size == 1) {
return insert_index;
}
// memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1),
// reinterpret_cast<char *>(queue1_base + insert_index),
// (queue1_size - insert_index) * sizeof(Candidate));
// queue1[insert_index] = queue2[queue2_start];
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
// for (idi insert_i = insert_index + 1; insert_i < q_i_1_bound; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
// queue1.data(),
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
}
}
//
// // Merge queue1 and queue2 into tmp_queue.
// std::vector<Candidate> tmp_queue(queue1_size + queue2_size);
// std::merge(queue1.begin() + queue1_start,
// queue1.begin() + queue1_start + queue1_size,
// queue2.begin() + queue2_start,
// queue2.begin() + queue2_start + queue2_size,
// tmp_queue.begin());
// // Resize tmp_queue.
// tmp_queue.resize(limit_size);
//
// // Swap queue1 and tmp_queue
// queue1.swap(tmp_queue);
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline void Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
// if (insert_index == queue1_size) {
// return insert_index;
// } else if (insert_index == queue1_size - 1) {
// queue1[queue1_start + insert_index] = queue2[queue2_start];
// return insert_index;
// }
//// auto *queue1_base = queue1.data() + queue1_start;
// // Insert the 1st of queue2
// insert_one_element_at(
// queue2[queue2_start],
//// queue1.data(),
// queue1,
// insert_index,
// queue1_start,
// queue1_size);
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
if (queue2_size == 1) {
return;
}
// memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1),
// reinterpret_cast<char *>(queue1_base + insert_index),
// (queue1_size - insert_index) * sizeof(Candidate));
// queue1[insert_index] = queue2[queue2_start];
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
// const idi q_i_1_bound = queue1_start + queue1_size;
idi q_i_1_bound = queue1_start + queue1_size; // WHen queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
idi insert_i;
for (insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
// for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
while (insert_i < queue1_length && q_i_2 < q_i_2_bound) {
queue1[queue1_start + insert_i++] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
}
// if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// // queue1 or queue2 finished traverse. Rest o
// break;
// }
else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else {
// if (queue1_size < queue1_length) {
// ++q_i_1_bound; // also needs to update q_i_1_bound
// }
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
// // Insert queue2[q_i_2] into queue1
// insert_one_element_at(
// queue2[q_i_2++],
//// queue1.data(),
// queue1,
// insert_i,
// queue1_start,
// queue1_size);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
}
}
// queue1_size = insert_i;
//
// // Merge queue1 and queue2 into tmp_queue.
// std::vector<Candidate> tmp_queue(queue1_size + queue2_size);
// std::merge(queue1.begin() + queue1_start,
// queue1.begin() + queue1_start + queue1_size,
// queue2.begin() + queue2_start,
// queue2.begin() + queue2_start + queue2_size,
// tmp_queue.begin());
// // Resize tmp_queue.
// tmp_queue.resize(limit_size);
//
// // Swap queue1 and tmp_queue
// queue1.swap(tmp_queue);
// return insert_index;
}
inline idi Searching::merge_all_queues_para_list(
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi bi = i + (1 << d) - 1; // i + 2^d - 1
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q,
// local_queues_list[ai][i_q].id_,
// local_queues_list[ai][i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi bi = i - 1;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
local_queues_list[ai].swap(local_queues_list[bi]);
std::swap(local_queues_ends[ai], local_queues_ends[bi]);
continue;
}
// else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) {
// local_queues_list[ai].swap(local_queues_list[bi]);
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
// }
// merge_two_queues_into_1st_queue_seq(
// local_queues_list[ai],
// 0,
// local_queues_ends[ai],
// local_queues_list[bi],
// 0,
// local_queues_ends[bi]);
idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
std::vector<Candidate> tmp_queue(tmp_length);
std::merge(
local_queues_list[ai].begin(),
local_queues_list[ai].begin() + local_queues_ends[ai],
local_queues_list[bi].begin(),
local_queues_list[bi].begin() + local_queues_ends[bi],
tmp_queue.begin());
if (tmp_length > L) {
tmp_queue.resize(L);
tmp_length = L;
} else if (tmp_length < L) {
tmp_queue.resize(L);
}
local_queues_list[ai].swap(tmp_queue);
local_queues_ends[ai] = tmp_length;
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[num_threads_ - 1],
0,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
std::vector<Candidate> &set_L,
const idi L)
{
int size = 1 << (static_cast<idi>(log2(num_threads_)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(local_queues_array.begin() + b_start,
local_queues_array.begin() + b_start + local_queues_ends[bi],
local_queues_array.begin() + a_start); // Copy bi to ai
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
merge_two_queues_into_1st_queue_seq_incr(
local_queues_array,
a_start,
local_queues_ends[ai],
local_queue_length,
local_queues_array,
b_start,
local_queues_ends[bi]);
// {
// idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
// std::vector<Candidate> tmp_queue(tmp_length);
// std::merge(
// local_queues_array.begin() + a_start,
// local_queues_array.begin() + a_start + local_queues_ends[ai],
// local_queues_array.begin() + b_start,
// local_queues_array.begin() + b_start + local_queues_ends[bi],
// tmp_queue.begin());
// if (tmp_length > L) {
// tmp_queue.resize(L);
// tmp_length = L;
// } else if (tmp_length < L) {
// tmp_queue.resize(L);
// }
//// local_queues_list[ai].swap(tmp_queue);
// std::copy(tmp_queue.begin(), tmp_queue.end(), local_queues_array.begin() + a_start);
// local_queues_ends[ai] = tmp_length;
// }
// {// Print queue a
// printf("d: %u "
// "i: %u "
// "ai: %u "
// "local_queues_ends[%d]: %d\n",
// d,
// i,
// ai,
// ai,
// local_queues_ends[ai]);
// for (idi i_q = a_start; i_q < a_start + local_queues_ends[ai]; ++i_q) {
// printf("[%u]: "
// "id: %u "
// "dist: %f\n",
// i_q - a_start,
// local_queues_array[i_q].id_,
// local_queues_array[i_q].distance_);
// }
// }
}
}
// Remain, prefix-sum-like merge
if (size != num_threads_) {
for (int i = size; i < num_threads_; ++i) {
idi ai = i;
idi a_start = ai * local_queue_length;
idi bi = i - 1;
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
// local_queues_list[ai].swap(local_queues_list[bi]);
std::copy(local_queues_array.begin() + b_start,
local_queues_array.begin() + b_start + local_queues_ends[bi],
local_queues_array.begin() + a_start); // Copy bi to ai
// std::swap(local_queues_ends[ai], local_queues_ends[bi]);
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
merge_two_queues_into_1st_queue_seq_incr(
local_queues_array,
a_start,
local_queues_ends[ai],
local_queue_length,
local_queues_array,
b_start,
local_queues_ends[bi]);
// {
// idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi];
// std::vector<Candidate> tmp_queue(tmp_length);
// std::merge(
// local_queues_array.begin() + a_start,
// local_queues_array.begin() + a_start + local_queues_ends[ai],
// local_queues_array.begin() + b_start,
// local_queues_array.begin() + b_start + local_queues_ends[bi],
// tmp_queue.begin());
// if (tmp_length > L) {
// tmp_queue.resize(L);
// tmp_length = L;
// } else if (tmp_length < L) {
// tmp_queue.resize(L);
// }
//// local_queues_list[ai].swap(tmp_queue);
// std::copy(tmp_queue.begin(), tmp_queue.end(), local_queues_array.begin() + a_start);
// local_queues_ends[ai] = tmp_length;
// }
}
}
// Merge into set_L
idi r = L;
if (local_queues_ends[num_threads_ - 1]) {
r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
// local_queues_list[num_threads_ - 1],
local_queues_array,
// 0,
(num_threads_ - 1) * local_queue_length,
local_queues_ends[num_threads_ - 1]);
}
// Reset local_queues_ends
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return r;
}
//void Searching::search_with_top_m(
inline void Searching::search_with_top_m(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
//
// {//test
// for (idi k_i = 0; k_i < K; ++k_i) {
// printf("%u: %u: %u %f\n",
// query_id,
// k_i, set_L[k_i].id_, set_L[k_i].distance_);
// }
// exit(1);
// }
}
//void Searching::search_with_top_m(
inline void Searching::search_with_top_m_to_get_distance_range(
const PANNS::idi M,
const PANNS::idi query_id,
// const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids)
// std::vector<idi> &set_K)
{
dist_max_ = -FLT_MAX;
dist_min_ = FLT_MAX;
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
// {// For distance range
// if (dist > dist_max_) {
// dist_max_ = dist;
// }
// if (dist < dist_min_) {
// dist_min_ = dist;
// }
// }
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// For histogram
for (idi i_l = 0; i_l < L; ++i_l) {
distf dist = set_L[i_l].distance_;
{// For distance range
if (dist > dist_max_) {
dist_max_ = dist;
}
if (dist < dist_min_) {
dist_min_ = dist;
}
}
}
}
}
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
}
//void Searching::search_with_top_m(
inline void Searching::search_with_top_m_myths_M(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("query_id: %u\n", query_id);
// }
const idi loc_range = L / 3;
boost::dynamic_bitset<> is_visited(num_v_);
{
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// {// For histogram
// const distf dist_range = dist_max_ - dist_min_;
// printf("iter:%u\n", 0);
// for (idi i_l = 0; i_l < L; ++i_l) {
// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
// }
// }
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
std::vector<idi> range_count(3, 0);
idi zero_inserted_count = 0;
// {//test
// printf("tmp_count: %u\n", tmp_count);
// }
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// {//test
// printf("top_m_candidates_ends: %u\n", top_m_candidates_end);
// }
{
if (0 == top_m_candidates_end) {
break;
}
}
uint64_t count_neighbors = 0;
uint64_t count_inserted = 0;
std::vector<idi> locs_to_count(M);
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
count_neighbors += out_degree;
idi num_inserted = 0;
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
++num_inserted;
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
// {
// printf("c_i: %u "
// "count: %u "
// "loc_inserted: %u\n",
// c_i,
// num_inserted,
// r);
// }
if (r < nk) {
nk = r;
}
{
++range_count[r / loc_range];
}
}
{
if (0 == num_inserted) {
++zero_inserted_count;
}
locs_to_count[c_i] = num_inserted;
count_inserted += num_inserted;
}
// {
// printf("c_i: %u "
// "num_inserted: %u\n",
// c_i,
// num_inserted);
// }
}
// {
// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) {
// locs_to_count[c_i] = 0;
// }
// printf("iter:%u\n", tmp_count);
// for (idi c_i = 0; c_i < M; ++c_i) {
// printf("%u %u\n", c_i, locs_to_count[c_i]);
// }
// }
// {//test
// idi sum = 0;
// for (const idi ct : range_count) sum += ct;
// printf("tmp_count: %u "
// "k: %u "
// "actual_M: %u %.1f%% "
// "zero_ins: %u %.1f%% "
// "1/3: %u %.1f%% "
// "2/3: %u %.1f%% "
// "3/3: %u %.1f%%\n",
// tmp_count,
// k,
// top_m_candidates_end, 100.0 * top_m_candidates_end / M,
// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end,
// range_count[0], 100.0 * range_count[0] / sum,
// range_count[1], 100.0 * range_count[1] / sum,
// range_count[2], 100.0 * range_count[2] / sum);
// }
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{
printf("query:%uiter: %u "
"#neighbors: %lu "
"#inserted: %lu "
"ratio: %.2f%%\n",
query_id, tmp_count,
count_neighbors,
count_inserted,
100.0 * count_inserted / count_neighbors);
}
// {// For histogram
//// const auto it_min = std::min_element(set_L.begin(), set_L.end());
//// const auto it_max = std::max_element(set_L.begin(), set_L.end());
//// const distf dist_min = it_min->distance_;
//// const distf dist_max = it_max->distance_;
//// const distf dist_min = it_min->distance_ - 1.0;
//// const distf dist_max = it_max->distance_ + 1.0;
// const distf dist_range = dist_max_ - dist_min_;
//// const distf dist_range = dist_max - dist_min;
//// {
//// printf("it_min->distance_: %f dist_min: %f\n",
//// it_min->distance_, dist_min);
//// }
//// const distf dist_range = it_max->distance_ - it_min->distance_;
// printf("iter:%u\n", tmp_count);
// for (idi i_l = 0; i_l < L; ++i_l) {
//// printf("%f\n", set_L[i_l].distance_);
//// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0);
// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0);
//// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0);
// }
// }
}
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
if (query_id == 3) {
exit(1);
}
}
// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP
//void Searching::search_with_top_m(
inline void Searching::search_with_top_m_profile_bit_CAS(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
// boost::dynamic_bitset<> is_visited(num_v_); // Bit array
BitVector is_visited(num_v_);
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
{// Self-defined BitVector
if (is_visited.atomic_is_bit_set(nb_id)) {
continue;
}
is_visited.atomic_set_bit(nb_id);
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
//
// {//test
// for (idi k_i = 0; k_i < K; ++k_i) {
// printf("%u: %u: %u %f\n",
// query_id,
// k_i, set_L[k_i].id_, set_L[k_i].distance_);
// }
// exit(1);
// }
}
inline void Searching::search_with_top_m_profile_prune_neighbors(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array
boost::dynamic_bitset<> is_visited(num_v_); // Bit array
// BitVector is_visited(num_v_);
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<Candidate> top_m_candidates(M);
// std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
std::vector<distf> local_thresholds(num_threads_, -FLT_MAX);
unsigned nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
{
idi tid = top_m_candidates_end % num_threads_;
if (tid != 0) {
if (local_thresholds[tid - 1] < set_L[c_i].distance_) {
local_thresholds[tid - 1] = set_L[c_i].distance_;
}
}
}
top_m_candidates[top_m_candidates_end++] = set_L[c_i];
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
{//test
printf("iter: %u ", tmp_count);
for (int tid = 0; tid < num_threads_; ++tid) {
if (tid == 0) {
printf(" [%d]:%f",
tid, set_L[L - 1].distance_);
} else {
printf(" [%d]:%f",
tid, local_thresholds[tid - 1]);
}
}
printf("\n");
}
int thread_chunk_size = (top_m_candidates_end + num_threads_ - 1) / num_threads_;
// Push M candidates' neighbors into the queue.
//#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = c_i / thread_chunk_size;
idi cand_id = top_m_candidates[c_i].id_;
// idi cand_id = top_m_candidates[c_i];
distf cand_dist = top_m_candidates[c_i].distance_;
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
// {// Father
// if (0 != c_i
// && dist > cand_dist) {
// is_visited[nb_id] = false;
// continue;
// }
// }
// {
// if (0 != c_i
// && dist > set_L[L/2].distance_) {
// continue;
// }
// }
{
if (0 != tid
&& -FLT_MAX != local_thresholds[tid - 1]
&& dist > local_thresholds[tid - 1]) {
continue;
}
}
Candidate cand(nb_id, dist, false);
idi r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{//test
exit(1);
}
}
///// Backup
//inline void Searching::search_with_top_m(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K)
//// std::vector< std::vector<idi> > &top_m_list)
//{
// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
//
////// DEPRECATED: the is_visited array cannot be shared among threads.
//inline void Searching::search_with_top_m_no_local_arrays(
// const PANNS::idi M,
// const PANNS::idi query_id,
// const PANNS::idi K,
// const PANNS::idi L,
// std::vector<Candidate> &set_L,
// const std::vector<idi> &init_ids,
// std::vector<idi> &set_K,
// boost::dynamic_bitset<> &is_visited)
//// std::vector< std::vector<idi> > &top_m_list)
//{
//// boost::dynamic_bitset<> is_visited(num_v_);
//
// {
// for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = true;
// }
// }
//
// const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// // Get the distances of all candidates, store in the set set_L.
// for (unsigned i = 0; i < L; i++) {
// unsigned v_id = init_ids[i];
// auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
// dataf norm = *v_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(v_data, query_data, norm);
// set_L[i] = Candidate(v_id, dist, false); // False means not checked.
// }
// std::sort(set_L.begin(), set_L.begin() + L);
//
// std::vector<idi> top_m_candidates(M);
// idi top_m_candidates_end = 0;
// idi k = 0; // Index of first unchecked candidate.
// idi tmp_count = 0; // for debug
// while (k < L) {
// ++tmp_count;
//
// unsigned nk = L;
//
// // Select M candidates
// idi last_k = L;
// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
// if (set_L[c_i].is_checked_) {
// continue;
// }
// last_k = c_i; // Record the location of the last candidate selected.
// set_L[c_i].is_checked_ = true;
// top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
// }
//
// // Push M candidates' neighbors into the queue.
// for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
// idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
// idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// for (idi e_i = 0; e_i < out_degree; ++e_i) {
// idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = true;
// auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
// dataf norm = *nb_data++;
//// ++count_distance_computation;
// distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > set_L[L-1].distance_) {
// continue;
// }
//// if (dist >= set_L[L-1].distance_) {
//// continue;
//// }
// Candidate cand(nb_id, dist, false);
// idi r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// }
// top_m_candidates_end = 0; // Clear top_m_candidates
//
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
// }
//
// for (idi k_i = 0; k_i < K; ++k_i) {
// set_K[k_i] = set_L[k_i].id_;
// }
////
//// {//test
//// for (idi k_i = 0; k_i < K; ++k_i) {
//// printf("%u: %u: %u %f\n",
//// query_id,
//// k_i, set_L[k_i].id_, set_L[k_i].distance_);
//// }
//// exit(1);
//// }
//}
inline void Searching::search_with_top_m_in_batch(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list)
{
std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
}
inline void Searching::para_search_with_top_m_critical_area(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector< std::vector<idi> > &top_m_list)
{
std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// int nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
// OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
//#pragma omp parallel for
//#pragma omp parallel for reduction(min : nk)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r;
//#pragma omp critical
{
r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
//#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::para_search_with_top_m_critical_area_no_omp(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector< std::vector<idi> > &top_m_list)
{
std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// int nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
// OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
//#pragma omp parallel for
//#pragma omp parallel for reduction(min : nk)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r;
//#pragma omp critical
{
r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
//#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::para_search_with_top_m_critical_area_yes_omp(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector< std::vector<idi> > &top_m_list)
{
std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// int nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
// OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
//#pragma omp parallel for
//#pragma omp parallel for reduction(min : nk)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r;
//#pragma omp critical
{
r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
//#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
}
inline void Searching::para_search_with_top_m_visited_array(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
std::vector<uint8_t> &is_visited)
// std::vector< std::vector<idi> > &top_m_list)
{
// uint64_t count_visited = 0;
// std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// ++count_visited;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
unsigned nk = L;
// int nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
// OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT.
//#pragma omp parallel for
//#pragma omp parallel for reduction(min : nk)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
// ++count_visited;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
idi r;
//#pragma omp critical
{
r = insert_into_queue(set_L, L, cand);
if (r < nk) {
nk = r;
}
}
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
//#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {
// printf("query_id: %u "
// "count_visited: %lu %f%%\n",
// query_id,
// count_visited,
// 100.0 * count_visited / num_v_);
// }
}
inline void Searching::para_search_with_top_m_merge_queues(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
{
// {//test
// printf("query_id: %u\n", query_id);
// }
// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
const idi local_queue_length = L;
std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
std::vector<idi> local_queues_ends(num_threads_, 0);
std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Add to the local queue.
add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
idi nk = L;
// // Merge. Parallel merging in every two queues.
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_para(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
// {// text
// if (query_id == 4 &&
// tmp_count == 5) {
// // Print local queues
// for (int t_i = 0; t_i < num_threads_; ++t_i) {
//// idi start_i = t_i * local_queue_length;
// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
// printf("t[%u][%u]: "
// "id: %u "
// "dist: %f\n",
// t_i, q_i,
// local_queues_list[t_i][q_i].id_,
// local_queues_list[t_i][q_i].distance_);
// }
// }
// printf("----------\n");
// for (idi i = 0; i < L; ++i) {
// printf("set_L[%u]: "
// "id: %u "
// "dist: %f\n",
// i,
// set_L[i].id_,
// set_L[i].distance_);
// }
// printf("----------\n");
// }
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
idi r = merge_all_queues_para_list(
local_queues_list,
local_queues_ends,
set_L,
L);
if (r < nk) {
nk = r;
}
} else {
if (local_queues_ends[0]) {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[0],
0,
local_queues_ends[0]);
local_queues_ends[0] = 0;
if (r < nk) {
nk = r;
}
}
}
}
// {//test
// if (query_id == 4) {
// for (idi i = 0; i < L; ++i) {
// printf("tmp_count: %u "
// "set_L[%u]: "
// "id: %u "
// "dist: %f\n",
// tmp_count,
// i,
// set_L[i].id_,
// set_L[i].distance_);
// }
// }
//
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
// {
// exit(1);
// }
// {//test
//
//// if (query_id == 4) {
// for (idi i = 0; i < L; ++i) {
// printf("set_L[%u]: "
// "id: %u "
// "dist: %f\n",
// i,
// set_L[i].id_,
// set_L[i].distance_);
// }
//// exit(1);
//// }
// }
}
//// Using local queue and then sequential merge.
inline void Searching::para_search_with_top_m_queues_seq_merge(
const PANNS::idi M,
const PANNS::idi query_id,
const PANNS::idi K,
const PANNS::idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector< std::vector<idi> > &top_m_list)
{
// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_;
const idi local_queue_length = L;
std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
std::vector<idi> local_queues_ends(num_threads_, 0);
std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
// {
// printf("tmp_count: %u "
// "k: %u\n",
// tmp_count,
// k);
// }
// unsigned nk = L;
// int nk = L;
// Select M candidates
idi last_k = L;
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
if (!AtomicOps::CAS(is_visited.data() + nb_id,
static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
continue;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// idi r;
//#pragma omp critical
// {
// r = insert_into_queue(set_L, L, cand);
// if (r < nk) {
// nk = r;
// }
// }
// Add to the local queue.
add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
idi nk = L;
// Merge
{
for (int tid = 0; tid < num_threads_; ++tid) {
if (0 == local_queues_ends[tid]) continue;
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[tid],
0,
local_queues_ends[tid]);
// L + 1);
local_queues_ends[tid] = 0; // Reset the local queue
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
//
// {//test
// for (idi k_i = 0; k_i < K; ++k_i) {
// printf("%u: %u: %u %f\n",
// query_id,
// k_i, set_L[k_i].id_, set_L[k_i].distance_);
// }
// exit(1);
// }
}
inline void Searching::para_search_with_top_m_merge_queues_no_CAS(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length,
std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<idi> &local_queues_ends,
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
//// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_;
// const idi local_queue_length = L;
// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length));
// std::vector<idi> local_queues_ends(num_threads_, 0);
//// std::vector<uint8_t> is_visited(num_v_, 0);
// boost::dynamic_bitset<> is_visited(num_v_);
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Add to the local queue.
add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
idi nk = L;
// // Merge. Parallel merging in every two queues.
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_para(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para(
// local_queues_list,
// local_queues_ends,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq(
// set_L,
// 0,
// L,
// local_queues_list[0],
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// Merge
{
for (int tid = 0; tid < num_threads_; ++tid) {
if (0 == local_queues_ends[tid]) continue;
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
local_queues_list[tid],
0,
local_queues_ends[tid]);
// L + 1);
local_queues_ends[tid] = 0; // Reset the local queue
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
// std::fill(is_visited.begin(), is_visited.end(), 0);
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
}
//inline void Searching::para_search_with_top_m_merge_queues_in_array(
inline void Searching::para_search_with_top_m_merge_queues_new_threshold(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
// BitVector &is_visited)
// std::vector<uint8_t> &is_visited)
boost::dynamic_bitset<> &is_visited)
{
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
// is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
uint64_t tmp_count_computation = 0;
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + L);
// idi min_index = L - 1;
// distf min_1st = set_L[min_index].distance_;
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
const idi local_queue_start = tid * local_queue_length;
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
// { // __ATOMIC_SEQ_CST edition
// if (!AtomicOps::CAS(is_visited.data() + nb_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
// }
// {// Acquire and Release edition
// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) {
// continue;
// }
// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE);
// }
// {// Self-defined BitVector
// if (is_visited.atomic_is_bit_set(nb_id)) {
// continue;
// }
// is_visited.atomic_set_bit(nb_id);
// }
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
++tmp_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
// if (dist > min_1st) {
// continue;
// } else if (min_index > 0) {
// // Inserted, so min_1st needs update
// if (dist > set_L[min_index - 1].distance_) {
// min_1st = dist;
// if (min_index < L - 1) {
// ++min_index;
// }
// } else {
// min_1st = set_L[--min_index].distance_;
// }
//// min_1st = set_L[--min_index].distance_;
// }
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
idi nk = L;
// // Merge. Parallel merging in every two queues.
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_para(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
// local_queues_list,
local_queues_array,
local_queues_ends,
local_queue_length,
set_L,
L);
if (r < nk) {
nk = r;
}
} else {
if (local_queues_ends[0]) {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
// local_queues_list[0],
local_queues_array,
0,
local_queues_ends[0]);
local_queues_ends[0] = 0;
if (r < nk) {
nk = r;
}
}
}
}
// // Merge Sequentially
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[tid],
//// 0,
// local_queues_array,
// tid * local_queue_length,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.reset();
// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
}
inline void Searching::para_search_with_top_m_merge_queues_by_sort(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
std::vector<idi> &dest_offsets,
const std::vector<idi> &offsets_load_set_L, // Offsets for reading from set_L.
BitVector &is_visited)
{
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
// is_visited[init_ids[c_i]] = 1;
is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
// boost::sort::block_indirect_sort(set_L.begin(), set_L.begin() + L, num_threads_);
local_queues_ends[0] = L;
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
// while(true) {
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
// for (idi c_i = 0; c_i < L && top_m_candidates_end < M; ++c_i) {
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
if (!top_m_candidates_end) {
break;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
// const idi local_queue_start = tid * local_queue_length;
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
// { // Sequential edition
// if (is_visited[nb_id]) {
// continue;
// }
// is_visited[nb_id] = 1;
// }
{// Self-defined BitVector
if (is_visited.atomic_is_bit_set(nb_id)) {
continue;
}
is_visited.atomic_set_bit(nb_id);
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 == tid) {
add_into_queue(set_L, 0, local_queues_ends[0], L, cand);
} else {
add_into_queue(set_L, offsets_load_set_L[tid], local_queues_ends[tid], local_queue_length, cand);
}
// add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
// Sort
{
if (num_threads_ == 1) {
continue;
}
std::copy(local_queues_ends.begin(), local_queues_ends.end(), dest_offsets.begin());
idi total_cands = PANNS::ParallelOps::prefix_sum_for_offsets(dest_offsets);
// Shrink sparse array into a dense array.
for (int i_t = 2; i_t < num_threads_; ++i_t) {
memmove(
set_L.data() + dest_offsets[i_t],
set_L.data() + offsets_load_set_L[i_t],
local_queues_ends[i_t] * sizeof(Candidate));
}
// Sort the array.
std::sort(set_L.begin(), set_L.begin() + total_cands);
// boost::sort::block_indirect_sort(set_L.begin(), set_L.begin() + total_cands, num_threads_);
// Reset
std::fill(local_queues_ends.begin() + 1, local_queues_ends.end(), 0);
}
// idi nk = L;
// // Merge. Merge all queues in parallel.
// {
// if (num_threads_ > 1) {
// idi r = merge_all_queues_para_array(
//// local_queues_list,
// local_queues_array,
// local_queues_ends,
// local_queue_length,
// set_L,
// L);
// if (r < nk) {
// nk = r;
// }
// } else {
// if (local_queues_ends[0]) {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[0],
// local_queues_array,
// 0,
// local_queues_ends[0]);
// local_queues_ends[0] = 0;
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// if (nk <= last_k) {
// k = nk;
// } else {
// k = last_k + 1;
// }
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
}
inline void Searching::para_search_with_top_m_merge_queues_myths(
const idi M,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_length, // Maximum size of local queue
// std::vector< std::vector<Candidate> > &local_queues_list,
std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends, // Sizes of local queue
BitVector &is_visited)
// std::vector<uint8_t> &is_visited)
// boost::dynamic_bitset<> &is_visited)
{
{
#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited.atomic_set_bit(init_ids[c_i]);
}
}
const dataf *query_data = queries_load_ + query_id * dimension_;
#pragma omp parallel for
for (idi v_i = 0; v_i < L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
#pragma omp parallel for
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L.begin(), set_L.begin() + L);
std::vector<idi> top_m_candidates(M);
idi top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi tmp_count = 0; // for debug
while (k < L) {
++tmp_count;
// {//test
// printf("tmp_count: %d\n", tmp_count);
// }
// Select M candidates
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[c_i].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_;
}
// Push M candidates' neighbors into the queue.
#pragma omp parallel for
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
int tid = omp_get_thread_num();
const idi local_queue_start = tid * local_queue_length;
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{// Self-defined BitVector
if (is_visited.atomic_is_bit_set(nb_id)) {
continue;
}
is_visited.atomic_set_bit(nb_id);
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate cand(nb_id, dist, false);
// Add to the local queue.
add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand);
// add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand);
}
}
top_m_candidates_end = 0; // Clear top_m_candidates
// {// Print all sizes of local queues
// printf("query%u:iter: %u", query_id, tmp_count);
// for (int i_t = 0; i_t < num_threads_; ++i_t) {
// printf(" [%u]: %u", i_t, local_queues_ends[i_t]);
// }
// printf("\n");
// }
idi nk = L;
// // Merge. Parallel merging in every two queues.
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_para(
// set_L,
// 0,
// L,
// local_queues_list[tid],
// 0,
// local_queues_ends[tid]);
//// idi r = merge_two_queues_into_1st_queue_seq(
//// set_L,
//// 0,
//// L,
//// local_queues_list[tid],
//// 0,
//// local_queues_ends[tid]);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
// {// text
// if (query_id == 4 &&
// tmp_count == 5) {
// // Print local queues
// for (int t_i = 0; t_i < num_threads_; ++t_i) {
// idi start_i = t_i * local_queue_length;
// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) {
// printf("t[%u][%u]: "
// "id: %u "
// "dist: %f\n",
// t_i, q_i,
// local_queues_array[q_i + start_i].id_,
// local_queues_array[q_i + start_i].distance_);
// }
// }
// printf("----------\n");
// for (idi i = 0; i < L; ++i) {
// printf("set_L[%u]: "
// "id: %u "
// "dist: %f\n",
// i,
// set_L[i].id_,
// set_L[i].distance_);
// }
// printf("----------\n");
// }
// }
// Merge. Merge all queues in parallel.
{
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
// local_queues_list,
local_queues_array,
local_queues_ends,
local_queue_length,
set_L,
L);
if (r < nk) {
nk = r;
}
} else {
if (local_queues_ends[0]) {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
0,
L,
// local_queues_list[0],
local_queues_array,
0,
local_queues_ends[0]);
local_queues_ends[0] = 0;
if (r < nk) {
nk = r;
}
}
}
}
// // Merge Sequentially
// {
// for (int tid = 0; tid < num_threads_; ++tid) {
// if (0 == local_queues_ends[tid]) continue;
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// 0,
// L,
//// local_queues_list[tid],
//// 0,
// local_queues_array,
// tid * local_queue_length,
// local_queues_ends[tid]);
//// L + 1);
// local_queues_ends[tid] = 0; // Reset the local queue
// if (r < nk) {
// nk = r;
// }
// }
// }
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// is_visited.reset();
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.clear_all();
std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
}
// {
// exit(1);
// }
// {
// if (query_id == 3) {
// exit(1);
// }
// }
}
inline void Searching::para_search_with_top_m_in_batch_embarassing_para(
const PANNS::idi M,
const PANNS::idi batch_start,
const PANNS::idi batch_size,
const PANNS::idi K,
const PANNS::idi L,
std::vector< std::vector<Candidate> > &set_L_list,
const std::vector<idi> &init_ids,
std::vector< std::vector<idi> > &set_K_list,
std::vector< boost::dynamic_bitset<> > &is_visited_list)
{
// std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_));
// std::vector< std::vector<bool> > is_visited_list(batch_size, std::vector<bool>(num_v_));
// Prepare the init_ids
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
auto &is_visited = is_visited_list[q_i];
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = true;
}
}
}
// Initialize set_L_list
{
//#pragma omp parallel for
for (idi q_i = 0; q_i < batch_size; ++q_i) {
const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_;
for (idi i = 0; i < L; i++) {
idi v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked.
}
std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L);
}
}
{
std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates
idi joint_queue_end = 0;
boost::dynamic_bitset<> is_in_joint_queue(num_v_);
// std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id].
// std::vector<idi> cands_query_ids_ends(num_v_, 0);
std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M);
std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate.
std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted
std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked
std::vector<idi> queries_not_finished(batch_size);
idi queries_not_finished_end = batch_size;
for (idi q_i = 0; q_i < batch_size; ++q_i) {
queries_not_finished[q_i] = q_i;
}
bool is_finished = false;
idi counter_for_debug = 0;
while (!is_finished) {
++counter_for_debug;
// Build the new joint queue
// Traverse every query's queue
for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) {
idi q_local_id = queries_not_finished[q_i];
// last_ks[q_local_id] = L;
auto &set_L = set_L_list[q_local_id];
idi top_m_count = 0;
for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
set_L[c_i].is_checked_ = true;
last_ks[q_local_id] = c_i;
++top_m_count;
idi cand_id = set_L[c_i].id_;
// Record which query selected cand_id
auto tmp_c = cands_query_ids.find(cand_id);
if (tmp_c != cands_query_ids.end()) {
tmp_c->second.push_back(q_local_id);
} else {
cands_query_ids.emplace(cand_id, std::vector<idi>());
cands_query_ids[cand_id].reserve(batch_size);
cands_query_ids[cand_id].push_back(q_local_id);
}
// cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id;
// Add candidate cand_id into the joint queue
if (is_in_joint_queue[cand_id]) {
continue;
}
is_in_joint_queue[cand_id] = true;
joint_queue[joint_queue_end++] = cand_id;
}
}
queries_not_finished_end = 0; // Clear queries_not_finished
// Traverse every shared candidate
for (idi c_i = 0; c_i < joint_queue_end; ++c_i) {
idi cand_id = joint_queue[c_i];
is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
const auto &query_local_ids = cands_query_ids[cand_id];
// Push neighbors to every queue of the queries that selected cand_id.
// Traverse cand_id's neighbors
// idi &q_i_bound = cands_query_ids_ends[cand_id];
// for (idi q_i = 0; q_i < q_i_bound; ++q_i) {
// idi q_local_id = query_local_ids[q_i];
for (idi q_local_id : query_local_ids) {
dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_;
auto &is_visited = is_visited_list[q_local_id];
auto &set_L = set_L_list[q_local_id];
// // Traverse cand_id's neighbors
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = true;
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
// ++count_distance_computation_;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[L-1].distance_) {
continue;
}
// if (dist >= set_L[L-1].distance_) {
// continue;
// }
Candidate new_cand(nb_id, dist, false);
idi insert_loc = insert_into_queue(set_L, L, new_cand);
if (insert_loc < nks[q_local_id]) {
nks[q_local_id] = insert_loc;
}
}
}
cands_query_ids.erase(cand_id);
// q_i_bound = 0; // Clear cands_query_ids[cand_id]
}
joint_queue_end = 0; // Clear joint_queue
for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) {
if (nks[q_local_id] <= last_ks[q_local_id]) {
ks[q_local_id] = nks[q_local_id];
} else {
ks[q_local_id] = last_ks[q_local_id] + 1;
}
nks[q_local_id] = L;
last_ks[q_local_id] = L;
if (ks[q_local_id] < L) {
queries_not_finished[queries_not_finished_end++] = q_local_id;
}
}
if (!queries_not_finished_end) {
is_finished = true;
}
}
}
{
for (idi q_i = 0; q_i < batch_size; ++q_i) {
for (idi c_i = 0; c_i < K && c_i < L; ++c_i) {
set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_;
}
}
}
////
// {//test
// for (idi q_i = 0; q_i < batch_size; ++q_i) {
// printf("query: %u\n", q_i + batch_start);
// for (idi c_i = 0; c_i < K; ++c_i) {
// printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_);
// }
// }
// }
{// Reset is_visited_list
for (idi q_i = 0; q_i < batch_size; ++q_i) {
is_visited_list[q_i].reset();
}
}
}
// DEPRECATED. No enough workload for OpenMP, and hard to implement efficiently.
///**
// * Prepare init_ids and flags, as they are constant for all queries.
// * @param[out] init_ids
// * @param L
// */
//inline void Searching::para_prepare_init_ids(
// std::vector<unsigned int> &init_ids,
// unsigned L) const
//{
//// idi num_ngbrs = get_out_degree(ep_);
//// edgei edge_start = nsg_graph_indices_[ep_];
//// // Store ep_'s neighbors as candidates
//// idi tmp_l = 0;
//// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
//// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
//// }
//// std::unordered_set<idi> visited_ids;
// std::vector<uint8_t> is_selected(num_v_, 0);
//// boost::dynamic_bitset<> is_selected(num_v_);
// idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
// idi out_degree = *out_edges++;
// idi init_ids_end = 0;
//// idi e_i_bound = out_degree <= L ? out_degree : L;
//#pragma omp parallel for
// for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
//// for (idi e_i = 0; e_i < e_i_bound; ++e_i) {
// idi v_id = out_edges[e_i];
//// if(is_selected[v_id]) {
//// continue;
//// }
//// is_selected[v_id] = 1;
//
// if (!AtomicOps::CAS(is_selected.data() + v_id,
// static_cast<uint8_t>(0),
// static_cast<uint8_t>(1))) {
// continue;
// }
//
//// init_ids[init_ids_end++] = v_id;
// volatile idi old_v = init_ids_end;
// volatile idi new_v = old_v + 1;
// while (!AtomicOps::CAS(&init_ids_end, old_v, new_v)) {
// old_v = init_ids_end;
// new_v = old_v + 1;
// }
// init_ids[old_v] = v_id;
// }
//
//// for (idi i = 0; i < tmp_l; ++i) {
//// is_visited[init_ids[i]] = true;
//// }
//
// // If ep_'s neighbors are not enough, add other random vertices
// idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
// while (init_ids_end < L) {
// tmp_id %= num_v_;
// idi v_id = tmp_id++;
// if (is_selected[v_id]) {
// continue;
// }
//// if (visited_ids.find(id) != visited_ids.end()) {
//// continue;
//// }
// is_selected[v_id] = 1;
//// visited_ids.insert(id);
// init_ids[init_ids_end++] = v_id;
//// tmp_l++;
// }
//}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
rpmio_internal.h | #ifndef H_RPMIO_INTERNAL
#define H_RPMIO_INTERNAL
/** \ingroup rpmio
* \file rpmio/rpmio_internal.h
*/
#include <rpmiotypes.h>
#include <rpmlog.h>
#include <rpmio.h>
#include <rpmurl.h>
#define _RPMPGP_INTERNAL
#include <rpmpgp.h>
#include <rpmxar.h>
/*@access pgpDig @*/ /* XXX FIXME: (by refactoring to foo.c) */
/*@access rpmxar @*/ /* XXX FIXME: (by refactoring to foo.c) */
/** \ingroup rpmio
*/
typedef struct _FDSTACK_s {
/*@exposed@*/
FDIO_t io;
/*@dependent@*/
void * fp;
int fdno;
} FDSTACK_t;
/** \ingroup rpmio
* Identify per-desciptor I/O operation statistics.
*/
typedef enum fdOpX_e {
FDSTAT_READ = 0, /*!< Read statistics index. */
FDSTAT_WRITE = 1, /*!< Write statistics index. */
FDSTAT_SEEK = 2, /*!< Seek statistics index. */
FDSTAT_CLOSE = 3, /*!< Close statistics index */
FDSTAT_DIGEST = 4, /*!< Digest statistics index. */
FDSTAT_MAX = 5
} fdOpX;
/** \ingroup rpmio
* Cumulative statistics for a descriptor.
*/
typedef /*@abstract@*/ struct {
struct rpmop_s ops[FDSTAT_MAX]; /*!< Cumulative statistics. */
} * FDSTAT_t;
/** \ingroup rpmio
*/
typedef struct _FDDIGEST_s {
DIGEST_CTX hashctx;
} * FDDIGEST_t;
/** \ingroup rpmio
* The FD_t File Handle data structure.
*/
struct _FD_s {
struct rpmioItem_s _item; /*!< usage mutex and pool identifier. */
int flags;
#define RPMIO_DEBUG_IO 0x40000000
#define RPMIO_DEBUG_REFS 0x20000000
int magic;
#define FDMAGIC 0x04463138
int nfps;
FDSTACK_t fps[8];
/*@dependent@*/ /*@relnull@*/
void * u; /* ufdio: URL info */
/*@relnull@*/
void * req; /* ufdio: HTTP request */
int rd_timeoutsecs; /* ufdRead: per FD_t timer */
ssize_t bytesRemain; /* ufdio: */
ssize_t contentLength; /* ufdio: */
int persist; /* ufdio: */
int wr_chunked; /* ufdio: */
int syserrno; /* last system errno encountered */
/*@observer@*/
const void *errcookie; /* gzdio/bzdio/ufdio: */
/*null@*/
const char *opath; /* open(2) args. */
int oflags;
mode_t omode;
/*@refcounted@*/ /*@relnull@*/
rpmxar xar; /* xar archive wrapper */
/*@refcounted@*/ /*@relnull@*/
pgpDig dig; /* signature parameters */
FDSTAT_t stats; /* I/O statistics */
size_t ndigests;
DIGEST_CTX *digests;
/*null@*/
const char *contentType; /* ufdio: (HTTP) */
/*null@*/
const char *contentDisposition; /* ufdio: (HTTP) */
time_t lastModified; /* ufdio: (HTTP) */
int ftpFileDoneNeeded; /* ufdio: (FTP) */
unsigned long long fd_cpioPos; /* cpio: */
#if defined(__LCLINT__)
/*@refs@*/
int nrefs; /*!< (unused) keep splint happy */
#endif
};
/*@access FD_t@*/
#define FDSANE(fd) assert(fd != NULL && fd->magic == FDMAGIC)
#define DBG(_f, _m, _x) \
/*@-modfilesys@*/ \
if ((_rpmio_debug | ((_f) ? ((FD_t)(_f))->flags : 0)) & (_m)) fprintf _x \
/*@=modfilesys@*/
#if defined(__LCLINT__XXX)
#define DBGIO(_f, _x)
#define DBGREFS(_f, _x)
#else
#define DBGIO(_f, _x) DBG((_f), RPMIO_DEBUG_IO, _x)
#define DBGREFS(_f, _x) DBG((_f), RPMIO_DEBUG_REFS, _x)
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** \ingroup rpmio
*/
/*@observer@*/ const char * fdbg(/*@null@*/ FD_t fd)
/*@*/;
/** \ingroup rpmio
*/
int fdFgets(FD_t fd, char * buf, size_t len)
/*@globals errno, fileSystem @*/
/*@modifies *buf, fd, errno, fileSystem @*/;
/** \ingroup rpmio
*/
/*@null@*/ FD_t ftpOpen(const char *url, /*@unused@*/ int flags,
/*@unused@*/ mode_t mode, /*@out@*/ urlinfo *uret)
/*@globals h_errno, fileSystem, internalState @*/
/*@modifies *uret, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ftpReq(FD_t data, const char * ftpCmd, const char * ftpArg)
/*@globals fileSystem, internalState @*/
/*@modifies data, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ftpCmd(const char * cmd, const char * url, const char * arg2)
/*@globals h_errno, fileSystem, internalState @*/
/*@modifies fileSystem, internalState @*/;
/** \ingroup rpmio
*/
int ufdClose( /*@only@*/ void * cookie)
/*@globals fileSystem, internalState @*/
/*@modifies cookie, fileSystem, internalState @*/;
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetOpen(FD_t fd, const char * path, int flags, mode_t mode)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->opath != NULL) {
free((void *)fd->opath);
fd->opath = NULL;
}
fd->opath = xstrdup(path);
fd->oflags = flags;
fd->omode = mode;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ /*@observer@*/ const char * fdGetOPath(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->opath;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetOFlags(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->oflags;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
mode_t fdGetOMode(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->omode;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetDig(FD_t fd, pgpDig dig)
/*@globals fileSystem @*/
/*@modifies fd, dig, fileSystem @*/
{
FDSANE(fd);
/*@-assignexpose -castexpose @*/
fd->dig = pgpDigFree(fd->dig);
fd->dig = pgpDigLink(dig);
/*@=assignexpose =castexpose @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ pgpDig fdGetDig(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@-compdef -retexpose -refcounttrans -usereleased @*/
return fd->dig;
/*@=compdef =retexpose =refcounttrans =usereleased @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetXAR(FD_t fd, rpmxar xar)
/*@globals fileSystem @*/
/*@modifies fd, xar, fileSystem @*/
{
FDSANE(fd);
/*@-assignexpose -castexpose @*/
fd->xar = rpmxarLink(xar, "fdSetXAR");
/*@=assignexpose =castexpose @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ rpmxar fdGetXAR(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@-compdef -refcounttrans -retexpose -usereleased @*/
return fd->xar;
/*@=compdef =refcounttrans =retexpose =usereleased @*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@null@*/ FDIO_t fdGetIo(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].io;
}
/** \ingroup rpmio
*/
/*@-nullstate@*/ /* FIX: io may be NULL */
/*@unused@*/ static inline
void fdSetIo(FD_t fd, /*@kept@*/ /*@null@*/ FDIO_t io)
/*@modifies fd @*/
{
FDSANE(fd);
/*@-assignexpose@*/
fd->fps[fd->nfps].io = io;
/*@=assignexpose@*/
}
/*@=nullstate@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@exposed@*/ /*@dependent@*/ /*@null@*/ FILE * fdGetFILE(FD_t fd)
/*@*/
{
FDSANE(fd);
/*@+voidabstract@*/
return ((FILE *)fd->fps[fd->nfps].fp);
/*@=voidabstract@*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
/*@exposed@*/ /*@dependent@*/ /*@null@*/ void * fdGetFp(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].fp;
}
/** \ingroup rpmio
*/
/*@-nullstate@*/ /* FIX: fp may be NULL */
/*@unused@*/ static inline
void fdSetFp(FD_t fd, /*@kept@*/ /*@null@*/ void * fp)
/*@modifies fd @*/
{
FDSANE(fd);
/*@-assignexpose@*/
fd->fps[fd->nfps].fp = fp;
/*@=assignexpose@*/
}
/*@=nullstate@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetFdno(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fps[fd->nfps].fdno;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetFdno(FD_t fd, int fdno)
/*@modifies fd @*/
{
FDSANE(fd);
fd->fps[fd->nfps].fdno = fdno;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetContentLength(FD_t fd, ssize_t contentLength)
/*@modifies fd @*/
{
FDSANE(fd);
fd->contentLength = fd->bytesRemain = contentLength;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdPush(FD_t fd, FDIO_t io, void * fp, int fdno)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->nfps >= (int)(sizeof(fd->fps)/sizeof(fd->fps[0]) - 1))
return;
fd->nfps++;
fdSetIo(fd, io);
fdSetFp(fd, fp);
fdSetFdno(fd, fdno);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdPop(FD_t fd)
/*@modifies fd @*/
{
FDSANE(fd);
if (fd->nfps < 0) return;
fdSetIo(fd, NULL);
fdSetFp(fd, NULL);
fdSetFdno(fd, -1);
fd->nfps--;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline /*@null@*/
rpmop fdstat_op(/*@null@*/ FD_t fd, fdOpX opx)
/*@*/
{
rpmop op = NULL;
if (fd != NULL && fd->stats != NULL && (int)opx >= 0 && opx < FDSTAT_MAX)
op = fd->stats->ops + opx;
return op;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_enter(/*@null@*/ FD_t fd, int opx)
/*@globals internalState @*/
/*@modifies internalState @*/
{
if (fd == NULL) return;
if (fd->stats != NULL)
(void) rpmswEnter(fdstat_op(fd, opx), 0);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_exit(/*@null@*/ FD_t fd, int opx, ssize_t rc)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
if (fd == NULL) return;
if (rc == -1)
fd->syserrno = errno;
else if (rc > 0 && fd->bytesRemain > 0)
switch (opx) {
case FDSTAT_READ:
case FDSTAT_WRITE:
fd->bytesRemain -= rc;
break;
default:
break;
}
if (fd->stats != NULL)
(void) rpmswExit(fdstat_op(fd, opx), rc);
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdstat_print(/*@null@*/ FD_t fd, const char * msg, FILE * fp)
/*@globals fileSystem @*/
/*@modifies *fp, fileSystem @*/
{
static int usec_scale = (1000*1000);
int opx;
if (fd == NULL || fd->stats == NULL) return;
for (opx = 0; opx < 4; opx++) {
rpmop op = &fd->stats->ops[opx];
if (op->count <= 0) continue;
switch (opx) {
case FDSTAT_READ:
if (msg != NULL) fprintf(fp, "%s:", msg);
fprintf(fp, "%8d reads, %8lu total bytes in %d.%06d secs\n",
op->count, (unsigned long)op->bytes,
(int)(op->usecs/usec_scale), (int)(op->usecs%usec_scale));
/*@switchbreak@*/ break;
case FDSTAT_WRITE:
if (msg != NULL) fprintf(fp, "%s:", msg);
fprintf(fp, "%8d writes, %8lu total bytes in %d.%06d secs\n",
op->count, (unsigned long)op->bytes,
(int)(op->usecs/usec_scale), (int)(op->usecs%usec_scale));
/*@switchbreak@*/ break;
case FDSTAT_SEEK:
/*@switchbreak@*/ break;
case FDSTAT_CLOSE:
/*@switchbreak@*/ break;
}
}
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetSyserrno(FD_t fd, int syserrno, /*@kept@*/ const void * errcookie)
/*@modifies fd @*/
{
FDSANE(fd);
fd->syserrno = syserrno;
/*@-assignexpose@*/
fd->errcookie = errcookie;
/*@=assignexpose@*/
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdGetRdTimeoutSecs(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->rd_timeoutsecs;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
unsigned long long fdGetCpioPos(FD_t fd)
/*@*/
{
FDSANE(fd);
return fd->fd_cpioPos;
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdSetCpioPos(FD_t fd, long int cpioPos)
/*@modifies fd @*/
{
FDSANE(fd);
fd->fd_cpioPos = cpioPos;
}
/** \ingroup rpmio
*/
/*@mayexit@*/ /*@unused@*/ static inline
FD_t c2f(/*@null@*/ void * cookie)
/*@*/
{
/*@-castexpose@*/
FD_t fd = (FD_t) cookie;
/*@=castexpose@*/
FDSANE(fd);
/*@-refcounttrans -retalias@*/ return fd; /*@=refcounttrans =retalias@*/
}
/** \ingroup rpmio
* Attach digest to fd.
*/
/*@unused@*/ static inline
void fdInitDigest(FD_t fd, pgpHashAlgo hashalgo, int flags)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
/*@+voidabstract@*/
fd->digests = xrealloc(fd->digests,
(fd->ndigests + 1) * sizeof(*fd->digests));
/*@=voidabstract@*/
fdstat_enter(fd, FDSTAT_DIGEST);
fd->digests[fd->ndigests++] = rpmDigestInit(hashalgo, flags);
fdstat_exit(fd, FDSTAT_DIGEST, 0);
}
/** \ingroup rpmio
* Attach digest to fd.
*/
/*@unused@*/ static inline
void fdInitHmac(FD_t fd, const void * key, size_t keylen)
/*@globals internalState @*/
/*@modifies internalState @*/
{
if (fd->digests != NULL && fd->ndigests > 0 && key != NULL)
(void) rpmHmacInit(fd->digests[fd->ndigests-1], key, keylen);
}
/** \ingroup rpmio
* Update digest(s) attached to fd.
*/
/*@unused@*/ static inline
void fdUpdateDigests(FD_t fd, const unsigned char * buf, ssize_t buflen)
/*@globals internalState @*/
/*@modifies fd, internalState @*/
{
int i;
if (fd->ndigests > 0 && buf != NULL && buflen > 0) {
fdstat_enter(fd, FDSTAT_DIGEST);
#if defined(_OPENMP)
#pragma omp parallel for if (fd->ndigests > 1)
#endif
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx == NULL)
continue;
(void) rpmDigestUpdate(ctx, buf, buflen);
}
fdstat_exit(fd, FDSTAT_DIGEST, buflen);
}
}
/** \ingroup rpmio
*/
/*@unused@*/ static inline
void fdFiniDigest(FD_t fd, pgpHashAlgo hashalgo,
/*@null@*/ /*@out@*/ void * datap,
/*@null@*/ /*@out@*/ size_t * lenp,
int asAscii)
/*@globals internalState @*/
/*@modifies fd, *datap, *lenp, internalState @*/
{
int i = -1;
if (fd->ndigests > 0) {
fdstat_enter(fd, FDSTAT_DIGEST);
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx == NULL)
continue;
if (rpmDigestAlgo(ctx) != hashalgo)
continue;
fd->digests[i] = NULL;
(void) rpmDigestFinal(ctx, datap, lenp, asAscii);
break;
}
fdstat_exit(fd, FDSTAT_DIGEST, 0);
}
if (i < 0) {
if (datap != NULL) *(void **)datap = NULL;
if (lenp != NULL) *lenp = 0;
}
}
/** \ingroup rpmio
*/
/*@-mustmod@*/
/*@unused@*/ static inline
void fdStealDigest(FD_t fd, pgpDig dig)
/*@modifies fd, dig @*/
{
int i;
/*@-type@*/ /* FIX: getters for pgpDig internals */
if (fd->ndigests > 0)
for (i = fd->ndigests - 1; i >= 0; i--) {
DIGEST_CTX ctx = fd->digests[i];
if (ctx != NULL)
switch (rpmDigestAlgo(ctx)) {
case PGPHASHALGO_MD5:
assert(dig->md5ctx == NULL);
/*@-assignexpose -onlytrans@*/
dig->md5ctx = ctx;
/*@=assignexpose =onlytrans@*/
fd->digests[i] = NULL;
/*@switchbreak@*/ break;
case PGPHASHALGO_SHA1:
case PGPHASHALGO_RIPEMD160:
case PGPHASHALGO_SHA256:
case PGPHASHALGO_SHA384:
case PGPHASHALGO_SHA512:
assert(dig->sha1ctx == NULL);
/*@-assignexpose -onlytrans@*/
dig->sha1ctx = ctx;
/*@=assignexpose =onlytrans@*/
fd->digests[i] = NULL;
/*@switchbreak@*/ break;
default:
/*@switchbreak@*/ break;
}
}
/*@=type@*/
}
/*@=mustmod@*/
/*@-shadow@*/
/** \ingroup rpmio
*/
/*@unused@*/ static inline
int fdFileno(/*@null@*/ void * cookie)
/*@*/
{
FD_t fd;
if (cookie == NULL) return -2;
fd = c2f(cookie);
return fd->fps[0].fdno;
}
/*@=shadow@*/
#ifdef __cplusplus
}
#endif
#endif /* H_RPMIO_INTERNAL */
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
#define STACK_SIZE (8 * 1024 * 1024)
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[SIZE_OF_BUFFERS],
key_buff1[SIZE_OF_BUFFERS],
key_buff2[SIZE_OF_BUFFERS],
partial_verify_vals[TEST_ARRAY_SIZE];
#ifdef USE_BUCKETS
INT_TYPE bucket_size[NUM_BUCKETS],
bucket_ptrs[NUM_BUCKETS];
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[TEST_ARRAY_SIZE],
test_rank_array[TEST_ARRAY_SIZE],
S_test_index_array[TEST_ARRAY_SIZE] =
{48427,17148,23627,62548,4431},
S_test_rank_array[TEST_ARRAY_SIZE] =
{0,18,346,64917,65463},
W_test_index_array[TEST_ARRAY_SIZE] =
{357773,934767,875723,898999,404505},
W_test_rank_array[TEST_ARRAY_SIZE] =
{1249,11698,1039987,1043896,1048018},
A_test_index_array[TEST_ARRAY_SIZE] =
{2112377,662041,5336171,3642833,4250760},
A_test_rank_array[TEST_ARRAY_SIZE] =
{104,17523,123928,8288932,8388264},
B_test_index_array[TEST_ARRAY_SIZE] =
{41869,812306,5102857,18232239,26860214},
B_test_rank_array[TEST_ARRAY_SIZE] =
{33422937,10244,59149,33135281,99},
C_test_index_array[TEST_ARRAY_SIZE] =
{44172927,72999161,74326391,129606274,21736814},
C_test_rank_array[TEST_ARRAY_SIZE] =
{61147,882988,266290,133997595,133525895};
/***********************/
/* function prototypes */
/***********************/
double is_randlc( double *X, double *A );
void full_verify( void );
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double is_randlc(X, A)
double *X;
double *A;
{
static int KS=0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if (KS == 0)
{
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
for (i=1; i<=23; i++)
{
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
for (i=1; i<=46; i++)
{
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return(R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq( double seed, double a )
{
double x;
int i, j, k;
k = MAX_KEY/4;
for (i=0; i<NUM_KEYS; i++)
{
x = is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
x += is_randlc(&seed, &a);
key_array[i] = k*x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
{
printf( "Full_verify: number of keys out of sort: %d\n",
j );
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank( int iteration )
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[MAX_KEY];
#pragma omp master
{
key_array[iteration] = iteration;
key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
partial_verify_vals[i] = key_array[test_index_array[i]];
/* Clear the work array */
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] = 0;
}
#pragma omp barrier
for (i=0; i<MAX_KEY; i++)
prv_buff1[i] = 0;
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma omp for nowait
for( i=0; i<NUM_KEYS; i++ ) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
prv_buff1[key_buff2[i]]++; /* Now they have individual key */
}
/* population */
for( i=0; i<MAX_KEY-1; i++ )
prv_buff1[i+1] += prv_buff1[i];
#pragma omp critical
{
for( i=0; i<MAX_KEY; i++ )
key_buff1[i] += prv_buff1[i];
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
#pragma omp barrier
#pragma omp master
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
{
k = partial_verify_vals[i]; /* test vals were put here */
if( 0 <= k && k <= NUM_KEYS-1 )
switch( CLASS )
{
case 'S':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'W':
if( i < 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-2) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'A':
if( i <= 2 )
{
if( key_buff1[k-1] !=
test_rank_array[i]+(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] !=
test_rank_array[i]-(iteration-1) )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'B':
if( i == 1 || i == 2 || i == 4 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
case 'C':
if( i <= 2 )
{
if( key_buff1[k-1] != test_rank_array[i]+iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
else
{
if( key_buff1[k-1] != test_rank_array[i]-iteration )
{
printf( "Failed partial verification: "
"iteration %d, test key %d\n",
iteration, i );
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if( iteration == MAX_ITERATIONS )
key_buff_ptr_global = key_buff1;
} /* end master */
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
static int realmain(void *cargv)
{
unsigned argv = (unsigned)((long)cargv);
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
omp_set_num_threads(argv);
/* Initialize the verification arrays if a valid class */
for( i=0; i<TEST_ARRAY_SIZE; i++ )
switch( CLASS )
{
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
};
/* Printout initial NPB info */
printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - IS Benchmark\n\n" );
printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS );
printf( " Iterations: %d\n", MAX_ITERATIONS );
/* Initialize timer */
timer_clear( 0 );
/* Generate random number sequence and subsequent keys on all procs */
create_seq( 314159265.00, /* Random number gen seed */
1220703125.00 ); /* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
#pragma omp parallel
rank( 1 );
/* Start verification counter */
passed_verification = 0;
if( CLASS != 'S' ) printf( "\n iteration\n" );
/* Start timer */
timer_start( 0 );
/* This is the main iteration */
#pragma omp parallel private(iteration)
for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ )
{
//#pragma omp master
//if( CLASS != 'S' ) printf( " %d\n", iteration );
rank( iteration );
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop( 0 );
timecounter = timer_read( 0 );
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if( passed_verification != 5*MAX_ITERATIONS + 1 ) {
passed_verification = 0;
}
#ifdef BOMP
backend_create_time(argv);
#endif
printf("Computetime %d %f\n", argv, timecounter);
printf("client done\n");
/* c_print_results( "IS", */
/* CLASS, */
/* TOTAL_KEYS, */
/* 0, */
/* 0, */
/* MAX_ITERATIONS, */
/* nthreads, */
/* timecounter, */
/* ((double) (MAX_ITERATIONS*TOTAL_KEYS)) */
/* /timecounter/1000000., */
/* "keys ranked", */
/* passed_verification, */
/* NPBVERSION, */
/* COMPILETIME, */
/* CC, */
/* CLINK, */
/* C_LIB, */
/* C_INC, */
/* CFLAGS, */
/* CLINKFLAGS, */
/* "randlc"); */
/**************************/
} /* E N D P R O G R A M */
/**************************/
#define STACK_SIZE (8 * 1024 * 1024)
int main(int argc, char** argv)
{
if (argc != 2) { /* Print usage */
printf("Usage: %s <Number of threads>\n", argv[0]);
exit(-1);
}
#ifdef BOMP
backend_span_domain(atoi(argv[1]), STACK_SIZE);
bomp_custom_init();
backend_thread_create_varstack(realmain, (void*)((uint64_t)atoi(argv[1])),
STACK_SIZE);
backend_thread_exit();
#else /* BOMP */
realmain((void*)((long)atoi(argv[1])));
#endif /* BOMP */
}
|
levmarq.h | #ifndef ARUCO_MM__LevMarq_H
#define ARUCO_MM__LevMarq_H
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <functional>
#include <iostream>
#include <cmath>
#include "ar_omp.h"
#include <ctime>
#include <cstring>
#include <vector>
#include <chrono>
#include <iomanip>
namespace aruco{
// Levenberg-Marquardt method for general problems Inspired in
//@MISC\{IMM2004-03215,
// author = "K. Madsen and H. B. Nielsen and O. Tingleff",
// title = "Methods for Non-Linear Least Squares Problems (2nd ed.)",
// year = "2004",
// pages = "60",
// publisher = "Informatics and Mathematical Modelling, Technical University of Denmark, {DTU}",
// address = "Richard Petersens Plads, Building 321, {DK-}2800 Kgs. Lyngby",
// url = "http://www.ltu.se/cms_fs/1.51590!/nonlinear_least_squares.pdf"
//}
template<typename T>
class LevMarq{
public:
typedef Eigen::Matrix<T,Eigen::Dynamic,1> eVector;
typedef std::function<void(const eVector &, eVector &)> F_z_x;
typedef std::function<void(const eVector &, Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> &)> F_z_J;
LevMarq();
/**
* @brief Constructor with parms
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
LevMarq(int maxIters,double minError,double min_step_error_diff=0,double tau=1 ,double der_epsilon=1e-3);
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
void setParams(int maxIters,double minError,double min_step_error_diff=0,double tau=1 ,double der_epsilon=1e-3);
/**
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in double
* @param f_J computes the jacobian of f(z)
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : J : output. Data must be returned in double
* @return final error
*/
double solve( eVector &z, F_z_x , F_z_J)throw (std::exception);
/// Step by step solve mode
/**
* @brief init initializes the search engine
* @param z
*/
void init(eVector &z, F_z_x )throw (std::exception);
/**
* @brief step gives a step of the search
* @param f_z_x error evaluation function
* @param f_z_J Jacobian function
* @return error of current solution
*/
bool step( F_z_x f_z_x , F_z_J f_z_J)throw (std::exception);
bool step( F_z_x f_z_x)throw (std::exception);
/**
* @brief getCurrentSolution returns the current solution
* @param z output
* @return error of the solution
*/
double getCurrentSolution(eVector &z)throw (std::exception);
/**
* @brief getBestSolution sets in z the best solution up to this moment
* @param z output
* @return error of the solution
*/
double getBestSolution(eVector &z)throw (std::exception);
/** Automatic jacobian estimation
* @brief solve non linear minimization problem ||F(z)||, where F(z)=f(z) f(z)^t
* @param z function params 1xP to be estimated. input-output. Contains the result of the optimization
* @param f_z_x evaluation function f(z)=x
* first parameter : z : input. Data is in double precision as a row vector (1xp)
* second parameter : x : output. Data must be returned in double
* @return final error
*/
double solve( eVector &z, F_z_x )throw (std::exception);
//to enable verbose mode
bool & verbose(){return _verbose;}
//sets a callback func call at each step
void setStepCallBackFunc(std::function<void(const eVector &)> callback){_step_callback=callback;}
//sets a function that indicates when the algorithm must be stop. returns true if must stop and false otherwise
void setStopFunction( std::function<bool(const eVector &)> stop_function){_stopFunction=stop_function;}
void calcDerivates(const eVector & z , Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> &, F_z_x);
private:
int _maxIters;
double _minErrorAllowed,_der_epsilon,_tau,_min_step_error_diff;
bool _verbose;
//--------
eVector curr_z,x64;
double currErr,prevErr,minErr ;
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> I,J;
double mu,v;
std::function<void(const eVector &)> _step_callback;
std::function<bool(const eVector &)> _stopFunction;
};
template<typename T>
LevMarq<T>::LevMarq(){
_maxIters=1000;_minErrorAllowed=0;_der_epsilon=1e-3;_verbose=false;_tau=1;v=5;_min_step_error_diff=0;
}
/**
* @brief Constructor with parms
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
template<typename T>
LevMarq<T>::LevMarq(int maxIters,double minError,double min_step_error_diff,double tau ,double der_epsilon ){
_maxIters=maxIters;_minErrorAllowed=minError;_der_epsilon=der_epsilon;_verbose=false;_tau=tau;v=5;_min_step_error_diff=min_step_error_diff;
}
/**
* @brief setParams
* @param maxIters maximum number of iterations of the algoritm
* @param minError to stop the algorithm before reaching the max iterations
* @param min_step_error_diff minimum error difference between two iterations. If below this level, then stop.
* @param tau parameter indicating how near the initial solution is estimated to be to the real one. If 1, it means that it is very far and the first
* @param der_epsilon increment to calculate the derivate of the evaluation function
* step will be very short. If near 0, means the opposite. This value is auto calculated in the subsequent iterations.
*/
template<typename T>
void LevMarq<T>::setParams(int maxIters,double minError,double min_step_error_diff,double tau ,double der_epsilon){
_maxIters=maxIters;
_minErrorAllowed=minError;
_der_epsilon=der_epsilon;
_tau=tau;
_min_step_error_diff=min_step_error_diff;
}
template<typename T>
void LevMarq<T>:: calcDerivates(const eVector & z , Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> &J, F_z_x f_z_x)
{
#pragma omp parallel for
for (int i=0;i<z.rows();i++) {
eVector zp(z),zm(z);
zp(i)+=_der_epsilon;
zm(i)-=_der_epsilon;
eVector xp,xm;
f_z_x( zp,xp);
f_z_x( zm,xm);
eVector dif=(xp-xm)/(2.f*_der_epsilon);
J.middleCols(i,1)=dif;
}
}
template<typename T>
double LevMarq<T>:: solve( eVector &z, F_z_x f_z_x)throw (std::exception){
return solve(z,f_z_x,std::bind(&LevMarq::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
bool LevMarq<T>:: step( F_z_x f_z_x)throw (std::exception){
return step(f_z_x,std::bind(&LevMarq::calcDerivates,this,std::placeholders::_1,std::placeholders::_2,f_z_x));
}
template<typename T>
void LevMarq<T>::init(eVector &z, F_z_x f_z_x )throw (std::exception){
curr_z=z;
I.resize(z.rows(),z.rows());
I.setIdentity();
f_z_x(curr_z,x64);
minErr=currErr=prevErr=x64.cwiseProduct(x64).sum();
J.resize(x64.rows(),z.rows());
mu=-1;
}
#define splm_get_time(a,b) std::chrono::duration_cast<std::chrono::duration<double>>(a-b).count()
template<typename T>
bool LevMarq<T>::step( F_z_x f_z_x, F_z_J f_J)throw (std::exception){
f_J(curr_z,J);
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> Jt=J.transpose();
Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> JtJ=(Jt*J);
eVector B=-Jt*x64;
if(mu<0){//first time only
int max=0;
for(int j=1;j<JtJ.cols();j++) if (JtJ(j,j)>JtJ(max,max)) max=j;
mu=JtJ(max,max)*_tau;
}
double gain=0,prev_mu=0;
int ntries=0;
bool isStepAccepted=false;
do{
//add/update dumping factor to JtJ.
//very efficient in any case, but particularly if initial dump does not produce improvement and must reenter
for(int j=0;j<JtJ.cols();j++) JtJ(j,j) += mu-prev_mu;//update mu
prev_mu=mu;
eVector delta= JtJ.ldlt().solve(B);
eVector estimated_z=curr_z+delta;
//compute error
f_z_x(estimated_z,x64);
auto err=x64.cwiseProduct(x64).sum();
auto L=0.5*delta.transpose()*((mu*delta) - B);
gain= (err-prevErr)/ L(0,0) ;
//get gain
if (gain>0){
mu=mu*std::max(double(0.33),1.-pow(2*gain-1,3));
v=5.f;
currErr=err;
curr_z=estimated_z;
isStepAccepted=true;
}
else{ mu=mu*v; v=v*5;}
}while(gain<=0 && ntries++<5);
if (_verbose) std::cout<<std::setprecision(5) <<"Curr Error="<<currErr<<" AErr(prev-curr)="<<prevErr-currErr<<" gain="<<gain<<" dumping factor="<<mu<<std::endl;
// //check if we must move to the new position or exit
if ( currErr<prevErr)
std::swap ( currErr,prevErr );
return isStepAccepted;
}
template<typename T>
double LevMarq<T>:: getCurrentSolution(eVector &z)throw (std::exception){
z=curr_z;
return currErr;
}
template<typename T>
double LevMarq<T>::solve( eVector &z, F_z_x f_z_x, F_z_J f_J)throw (std::exception){
init(z,f_z_x);
if( _stopFunction){
do{
step(f_z_x,f_J);
if (_step_callback) _step_callback(curr_z);
}while(!_stopFunction(curr_z));
}
else{
//intial error estimation
int mustExit=0;
for ( int i = 0; i < _maxIters && !mustExit; i++ ) {
if (_verbose)std::cerr<<"iteration "<<i<<"/"<<_maxIters<< " ";
bool isStepAccepted=step(f_z_x,f_J);
//check if we must exit
if ( currErr<_minErrorAllowed ) mustExit=1;
if( fabs( prevErr -currErr)<=_min_step_error_diff || !isStepAccepted) mustExit=2;
//exit if error increment
if (currErr<prevErr )mustExit=3;
// if ( (prevErr-currErr) < 1e-5 ) mustExit=true;
if (_step_callback) _step_callback(curr_z);
}
// std::cout<<"Exit code="<<mustExit<<std::endl;
}
z=curr_z;
return currErr;
}
}
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-3,4)),ceild(4*t2-Nz-4,8));t3<=min(min(min(floord(4*t2+Ny,8),floord(Nt+Ny-4,8)),floord(2*t1+Ny+1,8)),floord(4*t1-4*t2+Nz+Ny-1,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(8*t3+Nx+4,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),8*t3+6),64*t4+62),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unaryop__abs_uint32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_int64
// op(A') function: GB_tran__abs_uint32_int64
// C type: uint32_t
// A type: int64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_int64
(
uint32_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint16_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_int32
// op(A') function: GB_tran__minv_uint16_int32
// C type: uint16_t
// A type: int32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_int32
(
uint16_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bxor_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint16)
// A*D function (colscale): GB (_AxD__bxor_uint16)
// D*A function (rowscale): GB (_DxB__bxor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint16)
// C=scalar+B GB (_bind1st__bxor_uint16)
// C=scalar+B' GB (_bind1st_tran__bxor_uint16)
// C=A+scalar GB (_bind2nd__bxor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bxor_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_UINT16 || GxB_NO_BXOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__islt_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__islt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int64)
// A*D function (colscale): GB (_AxD__islt_int64)
// D*A function (rowscale): GB (_DxB__islt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int64)
// C=scalar+B GB (_bind1st__islt_int64)
// C=scalar+B' GB (_bind1st_tran__islt_int64)
// C=A+scalar GB (_bind2nd__islt_int64)
// C=A'+scalar GB (_bind2nd_tran__islt_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT64 || GxB_NO_ISLT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__islt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_threadprivate.c | // RUN: %libomp-compile-and-run
// REQUIRES: !(abt && (clang || gcc))
/*
* Threadprivate is tested in 2 ways:
* 1. The global variable declared as threadprivate should have
* local copy for each thread. Otherwise race condition and
* wrong result.
* 2. If the value of local copy is retained for the two adjacent
* parallel regions
*/
#include "omp_testsuite.h"
#include <stdlib.h>
#include <stdio.h>
static int sum0=0;
static int myvalue = 0;
#pragma omp threadprivate(sum0)
#pragma omp threadprivate(myvalue)
int test_omp_threadprivate()
{
int sum = 0;
int known_sum;
int i;
int iter;
int *data;
int size;
int num_failed = 0;
int my_random;
omp_set_dynamic(0);
#pragma omp parallel private(i)
{
sum0 = 0;
#pragma omp for
for (i = 1; i <= LOOPCOUNT; i++) {
sum0 = sum0 + i;
} /*end of for*/
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
if (known_sum != sum ) {
fprintf (stderr, " known_sum = %d, sum = %d\n", known_sum, sum);
}
/* the next parallel region is just used to get the number of threads*/
omp_set_dynamic(0);
#pragma omp parallel
{
#pragma omp master
{
size=omp_get_num_threads();
data=(int*) malloc(size*sizeof(int));
}
}/* end parallel*/
srand(45);
for (iter = 0; iter < 100; iter++) {
my_random = rand(); /* random number generator is
called inside serial region*/
/* the first parallel region is used to initialize myvalue
and the array with my_random+rank */
#pragma omp parallel
{
int rank;
rank = omp_get_thread_num ();
myvalue = data[rank] = my_random + rank;
}
/* the second parallel region verifies that the
value of "myvalue" is retained */
#pragma omp parallel reduction(+:num_failed)
{
int rank;
rank = omp_get_thread_num ();
num_failed = num_failed + (myvalue != data[rank]);
if(myvalue != data[rank]) {
fprintf (stderr, " myvalue = %d, data[rank]= %d\n",
myvalue, data[rank]);
}
}
}
free (data);
return (known_sum == sum) && !num_failed;
} /* end of check_threadprivate*/
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_threadprivate()) {
num_failed++;
}
}
return num_failed;
}
|
residualbased_incrementalupdate_static_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H )
#define KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H
/* System includes */
/* External includes */
/* Project includes */
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedIncrementalUpdateStaticScheme
* @ingroup KratosCore
* @brief This class provides the implementation of a static scheme
* @details The only operation done in this scheme is the update of the database, no predict is done
* @tparam TSparseSpace The sparse space considered
* @tparam TDenseSpace The dense space considered
* @see Scheme
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class ResidualBasedIncrementalUpdateStaticScheme
: public Scheme<TSparseSpace,TDenseSpace>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedIncrementalUpdateStaticScheme
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedIncrementalUpdateStaticScheme);
/// Base class definition
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
/// DoF array type definition
typedef typename BaseType::DofsArrayType DofsArrayType;
/// Data type definition
typedef typename BaseType::TDataType TDataType;
/// Matrix type definition
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// Vector type definition
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// Local system matrix type definition
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
/// Local system vector type definition
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
/// Elements containers definition
typedef ModelPart::ElementsContainerType ElementsArrayType;
/// Conditions containers definition
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
/// The definition of the vector containing the equation ids
typedef Element::EquationIdVectorType EquationIdVectorType;
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The pseudo static scheme (parameters)
* @param ThisParameters Dummy parameters
*/
explicit ResidualBasedIncrementalUpdateStaticScheme(Parameters ThisParameters)
: BaseType()
{
// Validate default parameters
Parameters default_parameters = Parameters(R"(
{
"name" : "ResidualBasedIncrementalUpdateStaticScheme"
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
}
/** Default onstructor.
*/
explicit ResidualBasedIncrementalUpdateStaticScheme()
: BaseType()
{}
/** Copy Constructor.
*/
explicit ResidualBasedIncrementalUpdateStaticScheme(ResidualBasedIncrementalUpdateStaticScheme& rOther)
:BaseType(rOther)
{
}
/** Destructor.
*/
~ResidualBasedIncrementalUpdateStaticScheme() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution.
* @param rModelPart The model part of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
mpDofUpdater->UpdateDofs(rDofSet, rDx);
KRATOS_CATCH("")
}
/**
* @brief Performing the prediction of the solution.
* @param rModelPart The model part of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
KRATOS_CATCH("")
}
/**
* @brief It initializes a non-linear iteration (for the element)
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void InitializeNonLinIteration(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Definition of the first element iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) {
auto it_elem = it_elem_begin + i;
it_elem->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first condition iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) {
auto it_cond = it_cond_begin + i;
it_cond->InitializeNonLinearIteration(r_current_process_info);
}
// Definition of the first constraint iterator
const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin();
#pragma omp parallel for
for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) {
auto it_const = it_const_begin + i;
it_const->InitializeNonLinearIteration(r_current_process_info);
}
KRATOS_CATCH( "" );
}
/**
* @brief It initializes a non-linear iteration (for an individual condition)
* @param rCurrentConditiont The condition to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Condition::Pointer rCurrentCondition,
ProcessInfo& rCurrentProcessInfo
) override
{
(rCurrentCondition)->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief It initializes a non-linear iteration (for an individual element)
* @param pCurrentElement The element to compute
* @param rCurrentProcessInfo The current process info instance
*/
void InitializeNonLinearIteration(
Element::Pointer pCurrentElement,
ProcessInfo& rCurrentProcessInfo
) override
{
(pCurrentElement)->InitializeNonLinearIteration(rCurrentProcessInfo);
}
/**
* @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme.
* @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system
* @param rCurrentElement The element to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Element& rCurrentElement,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
rCurrentElement.CalculateLocalSystem(rLHSContribution,rRHSContribution, rCurrentProcessInfo);
rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param pCurrentCondition The condition to compute
* @param rLHSContribution The LHS matrix contribution
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateSystemContributions(
Condition& rCurrentCondition,
LocalSystemMatrixType& rLHSContribution,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
rCurrentCondition.CalculateLocalSystem(rLHSContribution, rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief This function is designed to calculate just the RHS contribution
* @param rCurrentElement The element to compute
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Element& rCurrentElement,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
rCurrentElement.CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Functions totally analogous to the precedent but applied to the "condition" objects
* @param pCurrentCondition The condition to compute
* @param rRHSContribution The RHS vector contribution
* @param EquationId The ID's of the condition degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateRHSContribution(
Condition& rCurrentCondition,
LocalSystemVectorType& rRHSContribution,
EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
rCurrentCondition.CalculateRightHandSide(rRHSContribution, rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief This function is designed to calculate just the LHS contribution
* @param rCurrentElement The element to compute
* @param rLHSContribution The RHS vector contribution
* @param EquationId The ID's of the element degrees of freedom
* @param rCurrentProcessInfo The current process info instance
*/
void CalculateLHSContribution(
Element& rCurrentElement,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId,
const ProcessInfo& rCurrentProcessInfo
) override
{
KRATOS_TRY
rCurrentElement.CalculateLeftHandSide(rLHSContribution, rCurrentProcessInfo);
rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo);
KRATOS_CATCH("")
}
/**
* @brief Liberate internal storage.
*/
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedIncrementalUpdateStaticScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); /// The DoF updater, which will update the values
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ResidualBasedIncrementalUpdateStaticScheme
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_INCREMENTALUPDATE_STATIC_SCHEME_H defined */
|
openmp_baseline.c | void convert_baseline(unsigned char *img, int width, int height, int channels, int threads, unsigned char *result)
{
#pragma omp parallel for collapse(2)
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
result[y * width + x] =
0.2126 * img[(y * width + x) * channels] // red
+ 0.7152 * img[(y * width + x) * channels + 1] // green
+ 0.0722 * img[(y * width + x) * channels + 2]; // blue
}
}
} |
ex4.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char **argv)
{
const long N = 1000000;
int i, a[N];
#pragma omp parallel for
for (i = 0; i < N; i++)
a[i] = 2 * a[i];
return 0;
}
|
MetaballComponent.h | #pragma once
#include "ECS.h"
#include "TransformComponent.h"
#include "MarchingCubeComponent.h"
#include "glm/gtx/string_cast.hpp"
#include <cstdint>
#include <functional>
class MetaballComponent : public Component
{
public:
MetaballComponent(MarchingCubeComponent* __marchingCubeComponent, float __radius)
: Component{}
, _marchingCubeComponent { __marchingCubeComponent }
, _radius { __radius }
{}
void update([[maybe_unused]] double __deltaTime) override
{
// Testings (frame rate dependent)
t += 0.015f;
if (entity->getEntityID() == 13)
{
auto& transformComponent = entity->getComponent<TransformComponent>();
transformComponent.setPosition({0.0f, cos(t)*2.5f, sin(t)*2.5f});
}
else if (entity->getEntityID() == 14)
{
auto& transformComponent = entity->getComponent<TransformComponent>();
transformComponent.setPosition({0.0f, cos(t)*2.5f, -sin(t)*2.5f});
}
else
{
auto& transformComponent = entity->getComponent<TransformComponent>();
transformComponent.setPosition({0.0f, sin(-t)*2.5f, 0.0f});
}
// End Testings
ASSERT(entity->hasComponent<TransformComponent>(), "entity should have a TransformComponent");
auto pos = entity->getComponent<TransformComponent>().position();
auto& grid = _marchingCubeComponent->grid();
std::function<float(glm::vec3)> func = std::bind(&MetaballComponent::f, pos, _radius, std::placeholders::_1);
_marchingCubeComponent->addFunc(func);
//#pragma omp parallel for
for (std::uint64_t x = 0; x < grid.size(); ++x)
{
for (std::uint64_t y = 0; y < grid[x].size(); ++y)
{
for (std::uint64_t z = 0; z < grid[x][y].size(); ++z)
{
// loop cell points
for (std::uint8_t i = 0; i < 8; ++i)
{
_marchingCubeComponent->changeGrid(x, y, z, i, func(grid[x][y][z].points[i]));
}
}
}
}
}
static inline float f(glm::vec3 __center, float __radius, glm::vec3 __pos)
{
float d = glm::dot(__center - __pos, __center - __pos);
if (d == 0.0f) return 0.0f;
return std::pow(__radius, 2)/d;
}
private:
MarchingCubeComponent* _marchingCubeComponent;
float _radius;
double t = 0.0f;
};
|
GB_binop__times_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int64)
// A*D function (colscale): GB (_AxD__times_int64)
// D*A function (rowscale): GB (_DxB__times_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int64)
// C=scalar+B GB (_bind1st__times_int64)
// C=scalar+B' GB (_bind1st_tran__times_int64)
// C=A+scalar GB (_bind2nd__times_int64)
// C=A'+scalar GB (_bind2nd_tran__times_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
viter.c | /*
© 2011-2016 by Kornel Lesiński.
This file is part of libimagequant.
libimagequant is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
libimagequant is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with libimagequant. If not, see <http://www.gnu.org/licenses/>.
*/
#include "libimagequant.h"
#include "pam.h"
#include "viter.h"
#include "nearest.h"
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#define omp_get_thread_num() 0
#endif
/*
* Voronoi iteration: new palette color is computed from weighted average of colors that map to that palette entry.
*/
LIQ_PRIVATE void viter_init(const colormap *map, const unsigned int max_threads, viter_state average_color[])
{
memset(average_color, 0, sizeof(average_color[0])*(VITER_CACHE_LINE_GAP+map->colors)*max_threads);
}
LIQ_PRIVATE void viter_update_color(const f_pixel acolor, const float value, const colormap *map, unsigned int match, const unsigned int thread, viter_state average_color[])
{
match += thread * (VITER_CACHE_LINE_GAP+map->colors);
average_color[match].a += acolor.a * value;
average_color[match].r += acolor.r * value;
average_color[match].g += acolor.g * value;
average_color[match].b += acolor.b * value;
average_color[match].total += value;
}
LIQ_PRIVATE void viter_finalize(colormap *map, const unsigned int max_threads, const viter_state average_color[])
{
for (unsigned int i=0; i < map->colors; i++) {
double a=0, r=0, g=0, b=0, total=0;
// Aggregate results from all threads
for(unsigned int t=0; t < max_threads; t++) {
const unsigned int offset = (VITER_CACHE_LINE_GAP+map->colors) * t + i;
a += average_color[offset].a;
r += average_color[offset].r;
g += average_color[offset].g;
b += average_color[offset].b;
total += average_color[offset].total;
}
if (total && !map->palette[i].fixed) {
map->palette[i].acolor = (f_pixel){
.a = a / total,
.r = r / total,
.g = g / total,
.b = b / total,
};
map->palette[i].popularity = total;
}
}
}
LIQ_PRIVATE double viter_do_iteration(histogram *hist, colormap *const map, viter_callback callback, const bool fast_palette)
{
const unsigned int max_threads = omp_get_max_threads();
viter_state *average_color = malloc((VITER_CACHE_LINE_GAP+map->colors) * max_threads * sizeof(viter_state));
viter_init(map, max_threads, average_color);
struct nearest_map *const n = nearest_init(map, fast_palette);
hist_item *const achv = hist->achv;
const int hist_size = hist->size;
double total_diff=0;
#pragma omp parallel for if (hist_size > 3000) \
schedule(static) default(none) shared(average_color,callback) reduction(+:total_diff)
for(int j=0; j < hist_size; j++) {
float diff;
unsigned int match = nearest_search(n, &achv[j].acolor, achv[j].tmp.likely_colormap_index, &diff);
achv[j].tmp.likely_colormap_index = match;
total_diff += diff * achv[j].perceptual_weight;
viter_update_color(achv[j].acolor, achv[j].perceptual_weight, map, match, omp_get_thread_num(), average_color);
if (callback) callback(&achv[j], diff);
}
nearest_free(n);
viter_finalize(map, max_threads, average_color);
free(average_color);
return total_diff / hist->total_perceptual_weight;
}
|
relic_cp_rsa.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2020 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or modify it under the
* terms of the version 2.1 (or later) of the GNU Lesser General Public License
* as published by the Free Software Foundation; or version 2.0 of the Apache
* License as published by the Apache Software Foundation. See the LICENSE files
* for more details.
*
* RELIC is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE. See the LICENSE files for more details.
*
* You should have received a copy of the GNU Lesser General Public or the
* Apache License along with RELIC. If not, see <https://www.gnu.org/licenses/>
* or <https://www.apache.org/licenses/>.
*/
/**
* @file
*
* Implementation of the RSA cryptosystem.
*
* @ingroup cp
*/
#include <string.h>
#include "relic_core.h"
#include "relic_conf.h"
#include "relic_rand.h"
#include "relic_bn.h"
#include "relic_util.h"
#include "relic_cp.h"
#include "relic_md.h"
#include "relic_multi.h"
/*============================================================================*/
/* Private definitions */
/*============================================================================*/
/**
* Length of chosen padding scheme.
*/
#if CP_RSAPD == PKCS1
#define RSA_PAD_LEN (11)
#elif CP_RSAPD == PKCS2
#define RSA_PAD_LEN (2 * RLC_MD_LEN + 2)
#else
#define RSA_PAD_LEN (2)
#endif
/**
* Identifier for encrypted messages.
*/
#define RSA_PUB (02)
/**
* Identifier for signed messages.
*/
#define RSA_PRV (01)
/**
* Byte used as padding unit.
*/
#define RSA_PAD (0xFF)
/**
* Byte used as padding unit in PSS signatures.
*/
#define RSA_PSS (0xBC)
/**
* Identifier for encryption.
*/
#define RSA_ENC 1
/**
* Identifier for decryption.
*/
#define RSA_DEC 2
/**
* Identifier for signature.
*/
#define RSA_SIG 3
/**
* Identifier for verification.
*/
#define RSA_VER 4
/**
* Identifier for second encryption step.
*/
#define RSA_ENC_FIN 5
/**
* Identifier for second sining step.
*/
#define RSA_SIG_FIN 6
/**
* Identifier for signature of a precomputed hash.
*/
#define RSA_SIG_HASH 7
/**
* Identifier for verification of a precomputed hash.
*/
#define RSA_VER_HASH 8
#if CP_RSAPD == BASIC
/**
* Applies or removes simple encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_basic(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t pad = 0;
int result = RLC_OK;
bn_t t;
RLC_TRY {
bn_null(t);
bn_new(t);
switch (operation) {
case RSA_ENC:
case RSA_SIG:
case RSA_SIG_HASH:
/* EB = 00 | FF | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
break;
case RSA_DEC:
case RSA_VER:
case RSA_VER_HASH:
/* EB = 00 | FF | D. */
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (!bn_is_zero(t)) {
result = RLC_ERR;
}
*p_len = 1;
do {
(*p_len)++;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad == 0 && m_len > 0);
if (pad != RSA_PAD) {
result = RLC_ERR;
}
bn_mod_2b(m, m, (k_len - *p_len) * 8);
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
return result;
}
#endif
#if CP_RSAPD == PKCS1
/**
* ASN.1 identifier of the hash function SHA-224.
*/
static const uint8_t sh224_id[] =
{ 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c };
/**
* ASN.1 identifier of the hash function SHA-256.
*/
static const uint8_t sh256_id[] =
{ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 };
/**
* ASN.1 identifier of the hash function SHA-384.
*/
static const uint8_t sh384_id[] =
{ 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 };
/**
* ASN.1 identifier of the hash function SHA-512.
*/
static const uint8_t sh512_id[] =
{ 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 };
/**
* Returns a pointer to the ASN.1 identifier of a hash function according to the
* PKCS#1 v1.5 padding standard.
*
* @param[in] md - the hash function.
* @param[in, out] len - the length of the identifier.
* @return The pointer to the hash function identifier.
*/
static uint8_t *hash_id(int md, int *len) {
switch (md) {
case SH224:
*len = sizeof(sh224_id);
return (uint8_t *)sh224_id;
case SH256:
*len = sizeof(sh256_id);
return (uint8_t *)sh256_id;
case SH384:
*len = sizeof(sh384_id);
return (uint8_t *)sh384_id;
case SH512:
*len = sizeof(sh512_id);
return (uint8_t *)sh512_id;
default:
RLC_THROW(ERR_NO_VALID);
return NULL;
}
}
/**
* Applies or removes a PKCS#1 v1.5 encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_pkcs1(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t *id, pad = 0;
int len, result = RLC_OK;
bn_t t;
bn_null(t);
RLC_TRY {
bn_new(t);
switch (operation) {
case RSA_ENC:
/* EB = 00 | 02 | PS | 00 | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PUB);
*p_len = k_len - 3 - m_len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
do {
rand_bytes(&pad, 1);
} while (pad == 0);
bn_add_dig(m, m, pad);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
break;
case RSA_DEC:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (!bn_is_zero(t)) {
result = RLC_ERR;
}
*p_len = m_len;
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad != RSA_PUB) {
result = RLC_ERR;
}
do {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad != 0 && m_len > 0);
/* Remove padding and trailing zero. */
*p_len -= (m_len - 1);
bn_mod_2b(m, m, (k_len - *p_len) * 8);
break;
case RSA_SIG:
/* EB = 00 | 01 | PS | 00 | D. */
id = hash_id(MD_MAP, &len);
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PRV);
*p_len = k_len - 3 - m_len - len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
bn_lsh(m, m, 8 * len);
bn_read_bin(t, id, len);
bn_add(m, m, t);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
break;
case RSA_SIG_HASH:
/* EB = 00 | 01 | PS | 00 | D. */
bn_zero(m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PRV);
*p_len = k_len - 3 - m_len;
for (int i = 0; i < *p_len; i++) {
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PAD);
}
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
break;
case RSA_VER:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (!bn_is_zero(t)) {
result = RLC_ERR;
}
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad != RSA_PRV) {
result = RLC_ERR;
}
do {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad != 0 && m_len > 0);
if (m_len == 0) {
result = RLC_ERR;
}
/* Remove padding and trailing zero. */
id = hash_id(MD_MAP, &len);
m_len -= len;
bn_rsh(t, m, m_len * 8);
int r = 0;
for (int i = 0; i < len; i++) {
pad = (uint8_t)t->dp[0];
r |= pad - id[len - i - 1];
bn_rsh(t, t, 8);
}
*p_len = k_len - m_len;
bn_mod_2b(m, m, m_len * 8);
result = (r == 0 ? RLC_OK : RLC_ERR);
break;
case RSA_VER_HASH:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (!bn_is_zero(t)) {
result = RLC_ERR;
}
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
if (pad != RSA_PRV) {
result = RLC_ERR;
}
do {
m_len--;
bn_rsh(t, m, 8 * m_len);
pad = (uint8_t)t->dp[0];
} while (pad != 0 && m_len > 0);
if (m_len == 0) {
result = RLC_ERR;
}
/* Remove padding and trailing zero. */
*p_len = k_len - m_len;
bn_mod_2b(m, m, m_len * 8);
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
return result;
}
#endif
#if CP_RSAPD == PKCS2
/**
* Applies or removes a PKCS#1 v2.1 encryption padding.
*
* @param[out] m - the buffer to pad.
* @param[out] p_len - the number of added pad bytes.
* @param[in] m_len - the message length in bytes.
* @param[in] k_len - the key length in bytes.
* @param[in] operation - flag to indicate the operation type.
* @return RLC_ERR if errors occurred, RLC_OK otherwise.
*/
static int pad_pkcs2(bn_t m, int *p_len, int m_len, int k_len, int operation) {
uint8_t pad, h1[RLC_MD_LEN], h2[RLC_MD_LEN];
/* Chia - MSVC does not allow dynamic stack arrays */
uint8_t *mask = (uint8_t *)calloc(k_len, sizeof(uint8_t));
int result = RLC_OK;
bn_t t;
bn_null(t);
RLC_TRY {
bn_new(t);
switch (operation) {
case RSA_ENC:
/* DB = lHash | PS | 01 | D. */
md_map(h1, NULL, 0);
bn_read_bin(m, h1, RLC_MD_LEN);
*p_len = k_len - 2 * RLC_MD_LEN - 2 - m_len;
bn_lsh(m, m, *p_len * 8);
bn_lsh(m, m, 8);
bn_add_dig(m, m, 0x01);
/* Make room for the real message. */
bn_lsh(m, m, m_len * 8);
break;
case RSA_ENC_FIN:
/* EB = 00 | maskedSeed | maskedDB. */
rand_bytes(h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
bn_write_bin(mask, k_len - RLC_MD_LEN - 1, m);
md_mgf(h2, RLC_MD_LEN, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
bn_read_bin(t, h1, RLC_MD_LEN);
bn_lsh(t, t, 8 * (k_len - RLC_MD_LEN - 1));
bn_add(t, t, m);
bn_copy(m, t);
break;
case RSA_DEC:
m_len = k_len - 1;
bn_rsh(t, m, 8 * m_len);
if (!bn_is_zero(t)) {
result = RLC_ERR;
}
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h1, RLC_MD_LEN, t);
bn_mod_2b(m, m, 8 * m_len);
bn_write_bin(mask, m_len, m);
md_mgf(h2, RLC_MD_LEN, mask, m_len);
for (int i = 0; i < RLC_MD_LEN; i++) {
h1[i] ^= h2[i];
}
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m_len -= RLC_MD_LEN;
bn_rsh(t, m, 8 * m_len);
bn_write_bin(h2, RLC_MD_LEN, t);
md_map(h1, NULL, 0);
pad = 0;
for (int i = 0; i < RLC_MD_LEN; i++) {
pad |= h1[i] - h2[i];
}
if (result == RLC_OK) {
result = (pad ? RLC_ERR : RLC_OK);
}
bn_mod_2b(m, m, 8 * m_len);
*p_len = bn_size_bin(m);
(*p_len)--;
bn_rsh(t, m, *p_len * 8);
if (bn_cmp_dig(t, 1) != RLC_EQ) {
result = RLC_ERR;
}
bn_mod_2b(m, m, *p_len * 8);
*p_len = k_len - *p_len;
break;
case RSA_SIG:
case RSA_SIG_HASH:
/* M' = 00 00 00 00 00 00 00 00 | H(M). */
bn_zero(m);
bn_lsh(m, m, 64);
/* Make room for the real message. */
bn_lsh(m, m, RLC_MD_LEN * 8);
break;
case RSA_SIG_FIN:
memset(mask, 0, 8);
bn_write_bin(mask + 8, RLC_MD_LEN, m);
md_map(h1, mask, RLC_MD_LEN + 8);
bn_read_bin(m, h1, RLC_MD_LEN);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
t->dp[0] ^= 0x01;
/* m_len is now the size in bits of the modulus. */
bn_lsh(t, t, 8 * RLC_MD_LEN);
bn_add(m, t, m);
bn_lsh(m, m, 8);
bn_add_dig(m, m, RSA_PSS);
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i, 0);
}
break;
case RSA_VER:
case RSA_VER_HASH:
bn_mod_2b(t, m, 8);
if (bn_cmp_dig(t, RSA_PSS) != RLC_EQ) {
result = RLC_ERR;
} else {
for (int i = m_len; i < 8 * k_len; i++) {
if (bn_get_bit(m, i) != 0) {
result = RLC_ERR;
}
}
bn_rsh(m, m, 8);
bn_mod_2b(t, m, 8 * RLC_MD_LEN);
bn_write_bin(h2, RLC_MD_LEN, t);
bn_rsh(m, m, 8 * RLC_MD_LEN);
bn_write_bin(h1, RLC_MD_LEN, t);
md_mgf(mask, k_len - RLC_MD_LEN - 1, h1, RLC_MD_LEN);
bn_read_bin(t, mask, k_len - RLC_MD_LEN - 1);
for (int i = 0; i < t->used; i++) {
m->dp[i] ^= t->dp[i];
}
m->dp[0] ^= 0x01;
for (int i = m_len - 1; i < 8 * k_len; i++) {
bn_set_bit(m, i - ((RLC_MD_LEN + 1) * 8), 0);
}
if (!bn_is_zero(m)) {
result = RLC_ERR;
}
bn_read_bin(m, h2, RLC_MD_LEN);
*p_len = k_len - RLC_MD_LEN;
}
break;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
}
free(mask);
return result;
}
#endif
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
int cp_rsa_gen(rsa_t pub, rsa_t prv, int bits) {
bn_t t, r;
int result = RLC_OK;
if (pub == NULL || prv == NULL || bits == 0) {
return RLC_ERR;
}
bn_null(t);
bn_null(r);
RLC_TRY {
bn_new(t);
bn_new(r);
/* Generate different primes p and q. */
do {
bn_gen_prime(prv->crt->p, bits / 2);
bn_gen_prime(prv->crt->q, bits / 2);
} while (bn_cmp(prv->crt->p, prv->crt->q) == RLC_EQ);
/* Swap p and q so that p is smaller. */
if (bn_cmp(prv->crt->p, prv->crt->q) != RLC_LT) {
bn_copy(t, prv->crt->p);
bn_copy(prv->crt->p, prv->crt->q);
bn_copy(prv->crt->q, t);
}
/* n = pq. */
bn_mul(pub->crt->n, prv->crt->p, prv->crt->q);
bn_copy(prv->crt->n, pub->crt->n);
bn_sub_dig(prv->crt->p, prv->crt->p, 1);
bn_sub_dig(prv->crt->q, prv->crt->q, 1);
/* phi(n) = (p - 1)(q - 1). */
bn_mul(t, prv->crt->p, prv->crt->q);
bn_set_2b(pub->e, 16);
bn_add_dig(pub->e, pub->e, 1);
#if !defined(CP_CRT)
/* d = e^(-1) mod phi(n). */
bn_gcd_ext(r, prv->d, NULL, pub->e, t);
if (bn_sign(prv->d) == RLC_NEG) {
bn_add(prv->d, prv->d, t);
}
if (bn_cmp_dig(r, 1) == RLC_EQ) {
/* Restore p and q. */
bn_add_dig(prv->crt->p, prv->crt->p, 1);
bn_add_dig(prv->crt->q, prv->crt->q, 1);
result = RLC_OK;
}
#else
/* d = e^(-1) mod phi(n). */
bn_gcd_ext(r, prv->d, NULL, pub->e, t);
if (bn_sign(prv->d) == RLC_NEG) {
bn_add(prv->d, prv->d, t);
}
if (bn_cmp_dig(r, 1) == RLC_EQ) {
/* dP = d mod (p - 1). */
bn_mod(prv->crt->dp, prv->d, prv->crt->p);
/* dQ = d mod (q - 1). */
bn_mod(prv->crt->dq, prv->d, prv->crt->q);
/* Restore p and q. */
bn_add_dig(prv->crt->p, prv->crt->p, 1);
bn_add_dig(prv->crt->q, prv->crt->q, 1);
/* qInv = q^(-1) mod p. */
bn_mod_inv(prv->crt->qi, prv->crt->q, prv->crt->p);
result = RLC_OK;
}
#endif /* CP_CRT */
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(t);
bn_free(r);
}
return result;
}
int cp_rsa_enc(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t pub) {
bn_t m, eb;
int size, pad_len, result = RLC_OK;
bn_null(m);
bn_null(eb);
size = bn_size_bin(pub->crt->n);
if (pub == NULL || in_len <= 0 || in_len > (size - RSA_PAD_LEN)) {
return RLC_ERR;
}
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_zero(m);
bn_zero(eb);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC) == RLC_OK) {
#endif
bn_read_bin(m, in, in_len);
bn_add(eb, eb, m);
#if CP_RSAPD == PKCS2
pad_pkcs2(eb, &pad_len, in_len, size, RSA_ENC_FIN);
#endif
bn_mxp(eb, eb, pub->e, pub->crt->n);
if (size <= *out_len) {
*out_len = size;
memset(out, 0, *out_len);
bn_write_bin(out, size, eb);
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_dec(uint8_t *out, int *out_len, uint8_t *in, int in_len, rsa_t prv) {
bn_t m, eb;
int size, pad_len, result = RLC_OK;
bn_null(m);
bn_null(eb);
size = bn_size_bin(prv->crt->n);
if (prv == NULL || in_len != size || in_len < RSA_PAD_LEN) {
return RLC_ERR;
}
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_read_bin(eb, in, in_len);
#if !defined(CP_CRT)
bn_mxp(eb, eb, prv->d, prv->crt->n);
#else
bn_copy(m, eb);
#if MULTI == OPENMP
omp_set_num_threads(CORES);
#pragma omp parallel copyin(core_ctx) firstprivate(prv)
{
#pragma omp sections
{
#pragma omp section
{
#endif
/* m1 = c^dP mod p. */
bn_mxp(eb, eb, prv->crt->dp, prv->crt->p);
#if MULTI == OPENMP
}
#pragma omp section
{
#endif
/* m2 = c^dQ mod q. */
bn_mxp(m, m, prv->crt->dq, prv->crt->q);
#if MULTI == OPENMP
}
}
}
#endif
/* m1 = m1 - m2 mod p. */
bn_sub(eb, eb, m);
while (bn_sign(eb) == RLC_NEG) {
bn_add(eb, eb, prv->crt->p);
}
bn_mod(eb, eb, prv->crt->p);
/* m1 = qInv(m1 - m2) mod p. */
bn_mul(eb, eb, prv->crt->qi);
bn_mod(eb, eb, prv->crt->p);
/* m = m2 + m1 * q. */
bn_mul(eb, eb, prv->crt->q);
bn_add(eb, eb, m);
#endif /* CP_CRT */
if (bn_cmp(eb, prv->crt->n) != RLC_LT) {
result = RLC_ERR;
}
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, in_len, size, RSA_DEC) == RLC_OK) {
#endif
size = size - pad_len;
if (size <= *out_len) {
memset(out, 0, size);
bn_write_bin(out, size, eb);
*out_len = size;
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
result = RLC_ERR;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_sig(uint8_t *sig, int *sig_len, uint8_t *msg, int msg_len, int hash, rsa_t prv) {
bn_t m, eb;
int pad_len, size, result = RLC_OK;
uint8_t h[RLC_MD_LEN];
if (prv == NULL || msg_len < 0) {
return RLC_ERR;
}
pad_len = (!hash ? RLC_MD_LEN : msg_len);
#if CP_RSAPD == PKCS2
size = bn_bits(prv->crt->n) - 1;
size = (size / 8) + (size % 8 > 0);
if (pad_len > (size - 2)) {
return RLC_ERR;
}
#else
size = bn_size_bin(prv->crt->n);
if (pad_len > (size - RSA_PAD_LEN)) {
return RLC_ERR;
}
#endif
bn_null(m);
bn_null(eb);
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_zero(m);
bn_zero(eb);
int operation = (!hash ? RSA_SIG : RSA_SIG_HASH);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, pad_len, size, operation) == RLC_OK) {
#endif
if (!hash) {
md_map(h, msg, msg_len);
bn_read_bin(m, h, RLC_MD_LEN);
bn_add(eb, eb, m);
} else {
bn_read_bin(m, msg, msg_len);
bn_add(eb, eb, m);
}
#if CP_RSAPD == PKCS2
pad_pkcs2(eb, &pad_len, bn_bits(prv->crt->n), size, RSA_SIG_FIN);
#endif
bn_copy(m, eb);
#if !defined(CP_CRT)
bn_mxp(eb, eb, prv->d, prv->crt->n);
#else /* CP_CRT */
#if MULTI == OPENMP
omp_set_num_threads(CORES);
#pragma omp parallel copyin(core_ctx) firstprivate(prv)
{
#pragma omp sections
{
#pragma omp section
{
#endif
/* m1 = c^dP mod p. */
bn_mxp(eb, eb, prv->crt->dp, prv->crt->p);
#if MULTI == OPENMP
}
#pragma omp section
{
#endif
/* m2 = c^dQ mod q. */
bn_mxp(m, m, prv->crt->dq, prv->crt->q);
#if MULTI == OPENMP
}
}
}
#endif
/* m1 = m1 - m2 mod p. */
bn_sub(eb, eb, m);
while (bn_sign(eb) == RLC_NEG) {
bn_add(eb, eb, prv->crt->p);
}
bn_mod(eb, eb, prv->crt->p);
/* m1 = qInv(m1 - m2) mod p. */
bn_mul(eb, eb, prv->crt->qi);
bn_mod(eb, eb, prv->crt->p);
/* m = m2 + m1 * q. */
bn_mul(eb, eb, prv->crt->q);
bn_add(eb, eb, m);
bn_mod(eb, eb, prv->crt->n);
#endif /* CP_CRT */
size = bn_size_bin(prv->crt->n);
if (size <= *sig_len) {
memset(sig, 0, size);
bn_write_bin(sig, size, eb);
*sig_len = size;
} else {
result = RLC_ERR;
}
} else {
result = RLC_ERR;
}
}
RLC_CATCH_ANY {
RLC_THROW(ERR_CAUGHT);
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
}
return result;
}
int cp_rsa_ver(uint8_t *sig, int sig_len, uint8_t *msg, int msg_len, int hash, rsa_t pub) {
bn_t m, eb;
int size, pad_len, result;
uint8_t *h1 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN) + 8);
uint8_t *h2 = RLC_ALLOCA(uint8_t, RLC_MAX(msg_len, RLC_MD_LEN));
/* We suppose that the signature is invalid. */
result = 0;
if (h1 == NULL || h2 == NULL) {
RLC_FREE(h1);
RLC_FREE(h2);
return 0;
}
if (pub == NULL || msg_len < 0) {
return 0;
}
pad_len = (!hash ? RLC_MD_LEN : msg_len);
#if CP_RSAPD == PKCS2
size = bn_bits(pub->crt->n) - 1;
if (size % 8 == 0) {
size = size / 8 - 1;
} else {
size = bn_size_bin(pub->crt->n);
}
if (pad_len > (size - 2)) {
return 0;
}
#else
size = bn_size_bin(pub->crt->n);
if (pad_len > (size - RSA_PAD_LEN)) {
return 0;
}
#endif
bn_null(m);
bn_null(eb);
RLC_TRY {
bn_new(m);
bn_new(eb);
bn_read_bin(eb, sig, sig_len);
bn_mxp(eb, eb, pub->e, pub->crt->n);
int operation = (!hash ? RSA_VER : RSA_VER_HASH);
#if CP_RSAPD == BASIC
if (pad_basic(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS1
if (pad_pkcs1(eb, &pad_len, RLC_MD_LEN, size, operation) == RLC_OK) {
#elif CP_RSAPD == PKCS2
if (pad_pkcs2(eb, &pad_len, bn_bits(pub->crt->n), size, operation) == RLC_OK) {
#endif
#if CP_RSAPD == PKCS2
memset(h1, 0, 8);
if (!hash) {
md_map(h1 + 8, msg, msg_len);
md_map(h2, h1, RLC_MD_LEN + 8);
memset(h1, 0, RLC_MD_LEN);
bn_write_bin(h1, size - pad_len, eb);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, RLC_MD_LEN);
} else {
memcpy(h1 + 8, msg, msg_len);
md_map(h2, h1, RLC_MD_LEN + 8);
memset(h1, 0, msg_len);
bn_write_bin(h1, size - pad_len, eb);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, msg_len);
}
#else
memset(h1, 0, RLC_MAX(msg_len, RLC_MD_LEN));
bn_write_bin(h1, size - pad_len, eb);
if (!hash) {
md_map(h2, msg, msg_len);
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, h2, RLC_MD_LEN);
} else {
/* Everything went ok, so signature status is changed. */
result = util_cmp_const(h1, msg, msg_len);
}
#endif
result = (result == RLC_EQ ? 1 : 0);
} else {
result = 0;
}
}
RLC_CATCH_ANY {
result = 0;
}
RLC_FINALLY {
bn_free(m);
bn_free(eb);
RLC_FREE(h1);
RLC_FREE(h2);
}
return result;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
blake2bp.c | /*
BLAKE2 reference source code package - optimized C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
/*
blake2b_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P )
{
int err = blake2b_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = offset;
P->xof_length = 0;
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2bp_init_leaf_param( S, P );
}
static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen )
{
blake2b_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
P->node_offset = 0;
P->xof_length = 0;
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
}
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
}
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
}
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, S->outlen );
}
int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
}
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( S[i], in__, len );
}
blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; /* Mark as last node */
for( i = 0; i < PARALLELISM_DEGREE; ++i ) {
printf("file:%s line:%d\n", __FILE__, __LINE__);
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
}
return blake2b_final( FS, out, outlen );
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2bp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2bp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
convolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_transform_kernel_pack1to4_neon(const Mat& weight_data, Mat& weight_data_pack1to4, int num_input, int num_output, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// src = kw-kh-inch-outch
// dst = 4b-kw-kh-inch-outch/4b
Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output);
weight_data_pack1to4.create(maxk, num_input, num_output / 4, (size_t)4 * 4, 4);
for (int q = 0; q + 3 < num_output; q += 4)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
Mat g0 = weight_data_pack1to4.channel(q / 4);
for (int p = 0; p < num_input; p++)
{
const float* k00 = k0.row(p);
const float* k10 = k1.row(p);
const float* k20 = k2.row(p);
const float* k30 = k3.row(p);
float* g00 = g0.row(p);
for (int k = 0; k < maxk; k++)
{
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00 += 4;
}
}
}
}
static void convolution_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1to4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _sum = vdupq_n_f32(0.f);
if (bias_data_ptr)
{
_sum = vld1q_f32(bias_data_ptr + p * 4);
}
const float* kptr = (const float*)weight_data_pack1to4 + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++) // 29.23
{
float32x4_t _val = vdupq_n_f32(sptr[space_ofs[k]]);
float32x4_t _w = vld1q_f32(kptr);
_sum = vmlaq_f32(_sum, _val, _w);
kptr += 4;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
vst1q_f32(outptr + j * 4, _sum);
}
outptr += outw * 4;
}
}
}
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "dynmat.h"
#include <math.h>
#include <stdlib.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix, const long num_patom,
const long num_satom, const double *fc,
const double q[3], const double (*svecs)[3],
const long (*multi)[2], const double *mass,
const long *s2p_map, const long *p2s_map,
const double (*charge_sum)[3][3], const long i,
const long j);
static void get_dm(double dm_real[3][3], double dm_imag[3][3],
const long num_patom, const long num_satom, const double *fc,
const double q[3], const double (*svecs)[3],
const long (*multi)[2], const long *p2s_map,
const double (*charge_sum)[3][3], const long i, const long j,
const long k);
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G, const long num_patom,
const double q_cart[3], const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda, const double tolerance);
static void make_Hermitian(double *mat, const long num_band);
static void multiply_borns(double *dd, const double *dd_in,
const long num_patom, const double (*born)[3][3]);
long dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const long num_patom, const long num_satom,
const double *fc, const double q[3],
const double (*svecs)[3],
const long (*multi)[2], const double *mass,
const long *s2p_map, const long *p2s_map,
const double (*charge_sum)[3][3],
const long with_openmp) {
long i, j, ij;
if (with_openmp) {
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (ij = 0; ij < num_patom * num_patom; ij++) {
get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q, svecs,
multi, mass, s2p_map, p2s_map, charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix, num_patom, num_satom, fc, q,
svecs, multi, mass, s2p_map, p2s_map, charge_sum,
i, j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_recip_dipole_dipole(
double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G, const long num_patom, const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
const double (*born)[3][3], const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda, const double tolerance) {
long i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp, G_list, num_G, num_patom, q_cart, q_direction_cart,
dielectric, pos, lambda, tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_recip_dipole_dipole_q0(
double *dd_q0, /* [natom, 3, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G, const long num_patom, const double (*born)[3][3],
const double dielectric[3][3], const double (*pos)[3], /* [num_patom, 3] */
const double lambda, const double tolerance) {
long i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double *)malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1, G_list, num_G, num_patom, zero_vec, NULL, dielectric, pos,
lambda, tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp =
i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(
double (*charge_sum)[3][3], const long num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3], const double (*born)[3][3]) {
long i, j, k, a, b;
double(*q_born)[3];
q_born = (double(*)[3])malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom / num_patom, 3] */
/* shortest_vectors[:, 3] */
/* multiplicities[num_satom, num_patom, 2] */
void dym_transform_dynmat_to_fc(double *fc, const double *dm,
const double (*comm_points)[3],
const double (*svecs)[3],
const long (*multi)[2], const double *masses,
const long *s2pp_map, const long *fc_index_map,
const long num_patom, const long num_satom) {
long i, j, k, l, m, N, adrs, m_pair, i_pair, svecs_adrs;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
i_pair = j * num_patom + i;
m_pair = multi[i_pair][0];
svecs_adrs = multi[i_pair][1];
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < m_pair; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] * svecs[svecs_adrs + l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= m_pair;
sin_phase /= m_pair;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 +
i * num_patom * 18 + l * num_patom * 6 +
s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 +
m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) *
coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix, const long num_patom,
const long num_satom, const double *fc,
const double q[3], const double (*svecs)[3],
const long (*multi)[2], const double *mass,
const long *s2p_map, const long *p2s_map,
const double (*charge_sum)[3][3], const long i,
const long j) {
long k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real, dm_imag, num_patom, num_satom, fc, q, svecs, multi,
p2s_map, charge_sum, i, j, k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3], double dm_imag[3][3],
const long num_patom, const long num_satom, const double *fc,
const double q[3], const double (*svecs)[3],
const long (*multi)[2], const long *p2s_map,
const double (*charge_sum)[3][3], const long i, const long j,
const long k) {
long l, m, i_pair, m_pair, adrs;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
i_pair = k * num_patom + i;
m_pair = multi[i_pair][0];
adrs = multi[i_pair][1];
for (l = 0; l < m_pair; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[adrs + l][m];
}
cos_phase += cos(phase * 2 * PI) / m_pair;
sin_phase += sin(phase * 2 * PI) / m_pair;
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
const double dielectric[3][3]) {
long i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
const double (*G_list)[3], /* [num_G, 3] */
const long num_G, const long num_patom,
const double q_cart[3], const double *q_direction_cart,
const double dielectric[3][3],
const double (*pos)[3], /* [num_patom, 3] */
const double lambda, const double tolerance) {
long i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part =
get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_direction_cart[i] * q_direction_cart[j] /
dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs =
i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const long num_band) {
long i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT + 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd, const double *dd_in,
const long num_patom, const double (*born)[3][3]) {
long i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 +
j * 3 + n;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
matmult.c | #include <stdio.h>
#include <stdlib.h>
#include "matmult_initialize.h"
#ifndef MATRIX_SIZE
#define MATRIX_SIZE 512
#endif
#define NRA MATRIX_SIZE /* number of rows in matrix A */
#define NCA MATRIX_SIZE /* number of columns in matrix A */
#define NCB MATRIX_SIZE /* number of columns in matrix B */
double** allocateMatrix(int rows, int cols) {
int i;
double **matrix = (double**)malloc((sizeof(double*)) * rows);
for (i=0; i<rows; i++) {
matrix[i] = (double*)malloc((sizeof(double)) * cols);
}
return matrix;
}
void freeMatrix(double** matrix, int rows, int cols) {
int i;
for (i=0; i<rows; i++) {
free(matrix[i]);
}
free(matrix);
}
__inline double multiply(double a, double b) {
return a * b;
}
// cols_a and rows_b are the same value
void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for(j=0; j<cols_b; j++) {
for (k=0; k<cols_a; k++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) {
int i,j,k;
#pragma omp parallel private(i,j,k) shared(a,b,c)
{
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
#pragma omp for nowait
for (i=0; i<rows_a; i++) {
for (k=0; k<cols_a; k++) {
for(j=0; j<cols_b; j++) {
c[i][j] += multiply(a[i][k], b[k][j]);
}
}
}
} /*** End of parallel region ***/
}
double do_work(void) {
double **a, /* matrix A to be multiplied */
**b, /* matrix B to be multiplied */
**c; /* result matrix C */
a = allocateMatrix(NRA, NCA);
b = allocateMatrix(NCA, NCB);
c = allocateMatrix(NRA, NCB);
/*** Spawn a parallel region explicitly scoping all variables ***/
initialize(a, NRA, NCA);
initialize(b, NCA, NCB);
initialize(c, NRA, NCB);
compute(a, b, c, NRA, NCA, NCB);
compute_interchange(a, b, c, NRA, NCA, NCB);
double result = c[0][1];
freeMatrix(a, NRA, NCA);
freeMatrix(b, NCA, NCB);
freeMatrix(c, NCA, NCB);
return result;
}
int main (int argc, char *argv[])
{
do_work();
printf("Done.\n");
return 0;
}
|
parallel_queue_infinite_enqueue_dequeue.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
int main()
{
int n,a,num = 0;
printf("\n ENTER THE VALUE OF N \n");
scanf("%d",&n);
int id,d,Q[n],rear=-1,front=-1;
omp_set_dynamic(0);
#pragma omp parallel num_threads(2)
{
id=omp_get_thread_num();
if(id==0) //insert
{
while(1)
{
#pragma omp critical
{
if(rear<n-1)
{
Q[++rear]=num;
printf("\n INSERTED ITEM IS %d",num);
num++;
}
else
printf("\n NO SPACE");
fgetc(stdin);
}
}
}
else
{
while(1) //pop
{
#pragma omp critical
{
if(front == rear && front != -1)
{
d=Q[front];
front = -1;
rear = -1;
printf("\n DELETED ITEM IS %d",d);
}
if(front<rear)
{
d=Q[front];
front++;
printf("\n DELETED ITEM IS %d",d);
}
else
printf("\n NO ITEMS TO DELETE");
fgetc(stdin);
}
}
}
}
return 0;
} |
snefru_fmt_plug.c | /* Snefru cracker patch for JtR. Hacked together during May of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_snefru_256;
extern struct fmt_main fmt_snefru_128;
#elif FMT_REGISTERS_H
john_register_one(&fmt_snefru_256);
john_register_one(&fmt_snefru_128);
#else
#include <string.h>
#include "arch.h"
#include "snefru.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 128kb 256kb
// 1 - 214k 215k
// 64 - 1435k 1411k
// 128 - 1474k 1902k *** this was chosen
// 256 - 1508k 1511k
// 512 - 1649k 1564k
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#include "memdbg.h"
// Snefru-128 and Snefru-256 are the real format labels
#define FORMAT_LABEL "Snefru"
#define FORMAT_TAG "$snefru$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE128 16
#define BINARY_SIZE256 32
#define CMP_SIZE 16
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
static struct fmt_tests snefru_128_tests[] = {
{"53b8a9b1c9ed00174d88d705fb7bae30", "mystrongpassword"},
{"$snefru$53b8a9b1c9ed00174d88d705fb7bae30", "mystrongpassword"},
{NULL}
};
static struct fmt_tests snefru_256_tests[] = {
{"$snefru$4170e04e900e6221562ceb5ff6ea27fa9b9b0d9587add44a4379a02619c5a106", "mystrongpassword"},
{"4170e04e900e6221562ceb5ff6ea27fa9b9b0d9587add44a4379a02619c5a106", "mystrongpassword"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE256 / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self, int len)
{
char *p;
int extra;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (hexlenl(p, &extra) != len || extra)
return 0;
return 1;
}
static int valid256(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 64);
}
static int valid128(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 32);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + BINARY_SIZE256 * 2 + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
memcpy(out, FORMAT_TAG, TAG_LENGTH);
strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE256 * 2 + 1);
return out;
}
static void *get_binary_256(char *ciphertext)
{
static union {
unsigned char c[32];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 32; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_binary_128(char *ciphertext)
{
static union {
unsigned char c[16];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 16; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_256(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
snefru_ctx ctx;;
rhash_snefru256_init(&ctx);
rhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));
rhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_128(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
snefru_ctx ctx;;
rhash_snefru128_init(&ctx);
rhash_snefru_update(&ctx, (unsigned char*)saved_key[index], strlen(saved_key[index]));
rhash_snefru_final(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], CMP_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], CMP_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void snefru_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_snefru_256 = {
{
"Snefru-256",
"",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE256,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
snefru_256_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid256,
split,
get_binary_256,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
snefru_set_key,
get_key,
fmt_default_clear_keys,
crypt_256,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_snefru_128 = {
{
"Snefru-128",
"",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE128,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
snefru_128_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid128,
split,
get_binary_128,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
snefru_set_key,
get_key,
fmt_default_clear_keys,
crypt_128,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
residualbased_newton_raphson_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY)
#define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
// Strategies
#include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h"
// Utilities
#include "utilities/variable_utils.h"
#include "utilities/color_utilities.h"
#include "utilities/math_utils.h"
#include "custom_utilities/process_factory_utility.h"
#include "custom_utilities/contact_utilities.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedNewtonRaphsonContactStrategy
* @ingroup ContactStructuralMechanicsApplication
* @brief Contact Newton Raphson class
* @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedNewtonRaphsonContactStrategy :
public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ElementsContainerType ElementsArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ProcessFactoryUtility::Pointer ProcessesListType;
typedef std::size_t IndexType;
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* @brief Default constructor
* @param rModelPart The model part of the problem
* @param pScheme The integration scheme
* @param pNewLinearSolver The linear solver employed
* @param pNewConvergenceCriteria The convergence criteria employed
* @param MaxIterations The maximum number of iterations
* @param CalculateReactions The flag for the reaction calculation
* @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag The flag that allows to move the mesh
*/
ResidualBasedNewtonRaphsonContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})"),
ProcessesListType pMyProcesses = nullptr,
ProcessesListType pPostProcesses = nullptr
)
: ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ),
mThisParameters(ThisParameters),
mpMyProcesses(pMyProcesses),
mpPostProcesses(pPostProcesses)
{
KRATOS_TRY;
mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel();
Parameters default_parameters = GetDefaultParameters();
mThisParameters.ValidateAndAssignDefaults(default_parameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~ResidualBasedNewtonRaphsonContactStrategy() override
= default;
//******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************//
//***********************************************************************************//
/**
* @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the
* values of the solution step of interest are assumed equal to the old values
*/
void Predict() override
{
KRATOS_TRY
// Auxiliar zero array
const array_1d<double, 3> zero_array = ZeroVector(3);
// Set to zero the weighted gap
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
const bool frictional = r_model_part.Is(SLIP);
// We predict contact pressure in case of contact problem
if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array);
if (frictional) {
VariableUtils().SetVectorVar(WEIGHTED_SLIP, zero_array, nodes_array);
}
// Compute the current gap
ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
// We predict a contact pressure
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const std::size_t step = r_process_info[STEP];
if (step == 1) {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT);
}
} else {
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1));
}
}
}
// BaseType::Predict(); // NOTE: May cause problems in dynamics!!!
//
// // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated
// ModelPart& r_model_part = StrategyBaseType::GetModelPart();
// NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes();
//
// // We predict contact pressure in case of contact problem
// if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) {
// VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array);
//
// // Compute the current gap
// ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact"));
//
// // We predict a contact pressure
// ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
// const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY];
//
// // We iterate over the nodes
// bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true;
//
// #pragma omp parallel for
// for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
// auto it_node = nodes_array.begin() + i;
//
// const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
//
// const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter;
//
// if (current_gap < 0.0) {
// it_node->Set(ACTIVE, true);
// if (is_components) {
// it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap;
// } else {
// const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL);
// it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal;
// }
// }
// }
// }
KRATOS_CATCH("")
}
/**
* @brief Initialization of member variables and prior operations
*/
void Initialize() override
{
KRATOS_TRY;
BaseType::Initialize();
mFinalizeWasPerformed = false;
// Initializing NL_ITERATION_NUMBER
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
r_process_info[NL_ITERATION_NUMBER] = 1;
KRATOS_CATCH("");
}
/**
* @brief The problem of interest is solved.
* @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(),
* SolveSolutionStep() and FinalizeSolutionStep().
* All those functions can otherwise be called separately.
*/
double Solve() override
{
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
this->SolveSolutionStep();
this->FinalizeSolutionStep();
// TODO: Add something if necessary
return 0.0;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* before solving the solution step.
* @details A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
mFinalizeWasPerformed = false;
}
/**
* @brief Performs all the required operations that should be done (for each step)
* after solving the solution step.
*/
void FinalizeSolutionStep() override
{
KRATOS_TRY;
if (mFinalizeWasPerformed == false) {
BaseType::FinalizeSolutionStep();
// To avoid compute twice the FinalizeSolutionStep
mFinalizeWasPerformed = true;
}
KRATOS_CATCH("");
}
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool SolveSolutionStep() override
{
KRATOS_TRY;
// bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations
// bool is_converged = BaseSolveSolutionStep(); // Direct solution
bool is_converged = false;
// Getting model part
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
if (r_model_part.IsNot(INTERACTION)) {
// We get the system
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
// We get the process info
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
int inner_iteration = 0;
while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) {
++inner_iteration;
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;;
}
// We solve one loop
r_process_info[NL_ITERATION_NUMBER] = 1;
r_process_info[INNER_LOOP_ITERATION] = inner_iteration;
is_converged = BaseSolveSolutionStep();
// We check the convergence
BaseType::mpConvergenceCriteria->SetEchoLevel(0);
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b);
BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel);
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl;
else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl;
}
}
} else {
// We compute the base loop
r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1;
is_converged = BaseSolveSolutionStep();
}
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (!is_converged) {
is_converged = AdaptativeStep();
}
}
return is_converged;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Parameters mThisParameters; /// The configuration parameters
// ADAPTATIVE STRATEGY PARAMETERS
bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed
ProcessesListType mpMyProcesses; /// The processes list
ProcessesListType mpPostProcesses; /// The post processes list
// OTHER PARAMETERS
int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria
///@}
///@name Protected Operators
///@{
/**
* @brief Solves the current step.
* @details This function returns true if a solution has been found, false otherwise.
*/
bool BaseSolveSolutionStep()
{
KRATOS_TRY;
// Pointers needed in the solution
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
typename TSchemeType::Pointer pScheme = BaseType::GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = BaseType::GetBuilderAndSolver();
TSystemMatrixType& A = *BaseType::mpA;
TSystemVectorType& Dx = *BaseType::mpDx;
TSystemVectorType& b = *BaseType::mpb;
//initializing the parameters of the Newton-Raphson cicle
IndexType iteration_number = 1;
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
bool is_converged = false;
bool residual_is_updated = false;
pScheme->InitializeNonLinIteration(r_model_part, A, Dx, b);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
// We do a geometry check before solve the system for first time
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
// Function to perform the building and the solving phase.
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) {
TSparseSpace::SetToZero(A);
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildAndSolve(pScheme, r_model_part, A, Dx, b);
} else {
TSparseSpace::SetToZero(Dx); //Dx=0.00;
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(A, Dx, b, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
pScheme->FinalizeNonLinIteration(r_model_part, A, Dx, b);
if (is_converged) {
//initialisation of the convergence criteria
BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, r_model_part, b);
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
}
// Iteration Cicle... performed only for NonLinearProblems
while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) {
//setting the number of iteration
r_process_info[NL_ITERATION_NUMBER] = iteration_number;
pScheme->InitializeNonLinIteration(r_model_part, A, Dx, b);
is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
//call the linear system solver to find the correction mDx for the
//it is not called if there is no system to solve
if (SparseSpaceType::Size(Dx) != 0) {
if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) {
if( BaseType::GetKeepSystemConstantDuringIterations() == false) {
//A = 0.00;
TSparseSpace::SetToZero(A);
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildAndSolve(pScheme, r_model_part, A, Dx, b);
}
else {
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
}
else {
TSparseSpace::SetToZero(Dx);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHSAndSolve(pScheme, r_model_part, A, Dx, b);
}
} else {
KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl;
}
// Debugging info
BaseType::EchoInfo(iteration_number);
// Updating the results stored in the database
UpdateDatabase(A, Dx, b, StrategyBaseType::MoveMeshFlag());
// We now check the geometry
if (mThisParameters["adaptative_strategy"].GetBool()) {
if (CheckGeometryInverted()) {
KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl;
r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing
return false;
}
}
pScheme->FinalizeNonLinIteration(r_model_part, A, Dx, b);
residual_is_updated = false;
if (is_converged) {
if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) {
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, r_model_part, b);
residual_is_updated = true;
//std::cout << "mb is calculated" << std::endl;
}
is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, pBuilderAndSolver->GetDofSet(), A, Dx, b);
}
}
// Plots a warning if the maximum number of iterations is exceeded
if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0)
MaxIterationsExceeded();
// Recalculate residual if needed
// (note that some convergence criteria need it to be recalculated)
if (residual_is_updated == false) {
// NOTE:
// The following part will be commented because it is time consuming
// and there is no obvious reason to be here. If someone need this
// part please notify the community via mailing list before uncommenting it.
// Pooyan.
// TSparseSpace::SetToZero(mb);
// pBuilderAndSolver->BuildRHS(pScheme, r_model_part, mb);
}
// Calculate reactions if required
if (BaseType::mCalculateReactionsFlag)
pBuilderAndSolver->CalculateReactions(pScheme, r_model_part, A, Dx, b);
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief This method performs the adaptative step
*/
bool AdaptativeStep()
{
KRATOS_TRY;
bool is_converged = false;
// Plots a warning if the maximum number of iterations is exceeded
if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl;
if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0)
KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl;
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later
int split_number = 0;
// We iterate until we reach the convergence or we split more than desired
while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) {
// Expliting time step as a way to try improve the convergence
split_number += 1;
double aux_delta_time, current_time;
const double aux_time = SplitTimeStep(aux_delta_time, current_time);
current_time += aux_delta_time;
bool inside_the_split_is_converged = false;
IndexType inner_iteration = 0;
while (current_time <= aux_time) {
inner_iteration += 1;
r_process_info[STEP] += 1;
if (inner_iteration == 1) {
if (StrategyBaseType::MoveMeshFlag())
UnMoveMesh();
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
it_node->OverwriteSolutionStepData(1, 0);
// it_node->OverwriteSolutionStepData(2, 1);
}
r_process_info.SetCurrentTime(current_time); // Reduces the time step
FinalizeSolutionStep();
} else {
NodesArrayType& nodes_array = r_model_part.Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i)
(nodes_array.begin() + i)->CloneSolutionStepData();
r_process_info.CloneSolutionStepInfo();
r_process_info.ClearHistory(r_model_part.GetBufferSize());
r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step
}
// We execute the processes before the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteInitializeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteInitializeSolutionStep();
// In order to initialize again everything
BaseType::mInitializeWasPerformed = false;
mFinalizeWasPerformed = false;
// We repeat the solve with the new DELTA_TIME
this->Initialize();
this->InitializeSolutionStep();
this->Predict();
inside_the_split_is_converged = BaseType::SolveSolutionStep();
this->FinalizeSolutionStep();
// We execute the processes after the non-linear iteration
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteFinalizeSolutionStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->ExecuteFinalizeSolutionStep();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteBeforeOutputStep();
if (mpPostProcesses != nullptr)
mpPostProcesses->PrintOutput();
if (mpMyProcesses != nullptr)
mpMyProcesses->ExecuteAfterOutputStep();
current_time += aux_delta_time;
}
if (inside_the_split_is_converged)
is_converged = true;
}
// Plots a warning if the maximum number of iterations and splits are exceeded
if (is_converged == false)
MaxIterationsAndSplitsExceeded();
// Restoring original DELTA_TIME
r_process_info[DELTA_TIME] = original_delta_time;
return is_converged;
KRATOS_CATCH("");
}
/**
* @brief Here the database is updated
* @param A The LHS matrix
* @param Dx The increment of solution after solving system
* @param b The RHS vector
* @param MoveMesh The flag that tells if the mesh should be moved
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
BaseType::UpdateDatabase(A,Dx,b,MoveMesh);
// TODO: Add something if necessary
}
/**
* @brief his method checks if there is no element inverted
*/
bool CheckGeometryInverted()
{
ModelPart& r_model_part = StrategyBaseType::GetModelPart();
ProcessInfo& r_process_info = r_model_part.GetProcessInfo();
bool inverted_element = false;
ElementsArrayType& elements_array = r_model_part.Elements();
// NOT OMP
for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) {
auto it_elem = elements_array.begin() + i;
auto& geom = it_elem->GetGeometry();
if (geom.DeterminantOfJacobian(0) < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(geom.DeterminantOfJacobian(0))
}
return true;
}
// We check now the deformation gradient
std::vector<Matrix> deformation_gradient_matrices;
it_elem->GetValueOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info);
for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) {
const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]);
if (det_f < 0.0) {
if (mConvergenceCriteriaEchoLevel > 0) {
KRATOS_WATCH(it_elem->Id())
KRATOS_WATCH(det_f)
}
return true;
}
}
}
return inverted_element;
}
/**
* @brief Here the time step is splitted
* @param AuxDeltaTime The new delta time to be considered
* @param CurrentTime The current time
* @return The destination time
*/
double SplitTimeStep(
double& AuxDeltaTime,
double& CurrentTime
)
{
KRATOS_TRY;
const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME];
CurrentTime = aux_time - AuxDeltaTime;
StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one
AuxDeltaTime /= mThisParameters["split_factor"].GetDouble();
StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time
CoutSplittingTime(AuxDeltaTime, aux_time);
return aux_time;
KRATOS_CATCH("");
}
/**
* This method moves bak the mesh to the previous position
*/
void UnMoveMesh()
{
KRATOS_TRY;
if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false)
KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl;
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) {
auto it_node = nodes_array.begin() + i;
noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates();
noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1);
}
KRATOS_CATCH("");
}
/**
* @brief This method returns the defaulr parameters in order to avoid code duplication
* @return Returns the default parameters
*/
Parameters GetDefaultParameters()
{
Parameters default_parameters = Parameters(R"(
{
"adaptative_strategy" : false,
"split_factor" : 10.0,
"max_number_splits" : 3,
"inner_loop_iterations" : 5
})" );
return default_parameters;
}
/**
* @brief This method prints information after solving the problem
*/
void CoutSolvingProblem()
{
if (mConvergenceCriteriaEchoLevel != 0) {
std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl;
}
}
/**
* @brief This method prints information after split the increment of time
* @param AuxDeltaTime The new time step to be considered
* @param AuxTime The destination time
*/
void CoutSplittingTime(
const double AuxDeltaTime,
const double AuxTime
)
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME];
std::cout.precision(4);
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl;
std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl;
std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl;
std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations
*/
void MaxIterationsExceeded() override
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
/**
* @brief This method prints information after reach the max number of interations and splits
*/
void MaxIterationsAndSplitsExceeded()
{
if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) {
std::cout << "|----------------------------------------------------|" << std::endl;
std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl;
std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl;
std::cout << "|----------------------------------------------------|" << std::endl;
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedNewtonRaphsonContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
|
GB_binop__bclr_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__bclr_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16)
// C=scalar+B GB (_bind1st__bclr_uint16)
// C=scalar+B' GB (_bind1st_tran__bclr_uint16)
// C=A+scalar GB (_bind2nd__bclr_uint16)
// C=A'+scalar GB (_bind2nd_tran__bclr_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITCLR (x, y, uint16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bclr_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bclr_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bclr_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bclr_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bclr_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bclr_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bclr_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bclr_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
TemporalMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/TemporalMaxPooling.c"
#else
static int nn_(TemporalMaxPooling_updateOutput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
int kW = luaT_getfieldcheckint(L, 1, "kW");
int dW = luaT_getfieldcheckint(L, 1, "dW");
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor);
luaL_argcheck(L, input->nDimension == 2, 2, "2D tensor expected");
luaL_argcheck(L, input->size[0] >= kW, 2, "input sequence smaller than kernel size");
// sizes
long niframe = input->size[0];
long framesize = input->size[1];
long noframe = (niframe - kW) / dW + 1;
// get contiguous input
input = THTensor_(newContiguous)(input);
// resize output
THTensor_(resize2d)(output, noframe, framesize);
// indices will contain index locations for each output point
THTensor_(resize2d)(indices, noframe, framesize);
// get raw pointers
real *input_data = THTensor_(data)(input);
real *output_data = THTensor_(data)(output);
real *indices_data = THTensor_(data)(indices);
long t, x, y;
for(t = 0; t < noframe; t++)
{
real *ip = input_data + t*framesize*dW;
real *op = output_data + t*framesize;
real *xp = indices_data + t*framesize;
#pragma omp parallel for private(y)
for(y = 0; y < framesize; y++)
{
// compute local max:
long maxindex = -1;
real maxval = -THInf;
for(x = 0; x < kW; x++)
{
real val = ip[x*framesize+y];
if (val > maxval)
{
maxval = val;
maxindex = x;
}
}
// set output to local max
op[y] = maxval;
xp[y] = (real)maxindex;
}
}
// cleanup
THTensor_(free)(input);
return 1;
}
static int nn_(TemporalMaxPooling_updateGradInput)(lua_State *L)
{
THTensor *input = luaT_checkudata(L, 2, torch_Tensor);
THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor);
int dW = luaT_getfieldcheckint(L, 1, "dW");
THTensor *indices = luaT_getfieldcheckudata(L, 1, "indices", torch_Tensor);
THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor);
// get contiguous gradOutput
gradOutput = THTensor_(newContiguous)(gradOutput);
// resize and zero
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
// sizes
int noframe = gradOutput->size[0];
long framesize = gradOutput->size[1];
// get raw pointers
real *gradInput_data = THTensor_(data)(gradInput);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *indices_data = THTensor_(data)(indices);
long t, y;
for(t = 0; t < noframe; t++)
{
real *gip = gradInput_data + t*framesize*dW;
real *gop = gradOutput_data + t*framesize;
real *xp = indices_data + t*framesize;
#pragma omp parallel for private(y)
for(y = 0; y < framesize; y++)
{
// compute local max:
long maxindex = (long)xp[y];
gip[maxindex*framesize+y] += gop[y];
}
}
// cleanup
THTensor_(free)(gradOutput);
return 1;
}
static const struct luaL_Reg nn_(TemporalMaxPooling__) [] = {
{"TemporalMaxPooling_updateOutput", nn_(TemporalMaxPooling_updateOutput)},
{"TemporalMaxPooling_updateGradInput", nn_(TemporalMaxPooling_updateGradInput)},
{NULL, NULL}
};
static void nn_(TemporalMaxPooling_init)(lua_State *L)
{
luaT_pushmetatable(L, torch_Tensor);
luaT_registeratname(L, nn_(TemporalMaxPooling__), "nn");
lua_pop(L,1);
}
#endif
|
GB_unaryop__lnot_uint64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint64_uint8
// op(A') function: GB_tran__lnot_uint64_uint8
// C type: uint64_t
// A type: uint8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint64_uint8
(
uint64_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
task_late_fulfill.c | // RUN: %libomp-compile -fopenmp-version=50 && env OMP_NUM_THREADS='3' \
// RUN: %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// Checked gcc 10.1 still does not support detach clause on task construct.
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10
// clang supports detach clause since version 11.
// UNSUPPORTED: clang-10, clang-9, clang-8, clang-7
// icc compiler does not support detach clause.
// UNSUPPORTED: icc
#include "callback.h"
#include <omp.h>
int main() {
#pragma omp parallel
#pragma omp master
{
omp_event_handle_t event;
omp_event_handle_t *f_event;
#pragma omp task detach(event) depend(out : f_event) shared(f_event) if (0)
{
printf("task 1\n");
f_event = &event;
}
#pragma omp task depend(in : f_event)
{ printf("task 2\n"); }
printf("calling omp_fulfill_event\n");
omp_fulfill_event(*f_event);
#pragma omp taskwait
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin:
// CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]],
// CHECK-SAME: parent_task_frame.exit=[[NULL]],
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: requested_team_size=3,
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// The following is to match the taskwait task created in __kmpc_omp_wait_deps
// this should go away, once codegen for "detached if(0)" is fixed
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: parent_task_frame.exit=0x{{[0-f]+}},
// CHECK-SAME: parent_task_frame.reenter=0x{{[0-f]+}},
// CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]],
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: second_task_id=[[TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_switch=7
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=[[IMPLICIT_TASK_ID]],
// CHECK-SAME: prior_task_status=ompt_task_detach=4
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_schedule:
// CHECK-SAME: first_task_id=[[TASK_ID]],
// CHECK-SAME: second_task_id=18446744073709551615,
// CHECK-SAME: prior_task_status=ompt_task_late_fulfill=6
|
pi1.c | #include <omp.h>
static long num_steps = 100000;
double step;
#define NUM_THREADS 2
void main ()
{ int i; double x, sum, pi=0.0;
step = 1.0/(double) num_steps;
omp_set_num_threads(NUM_THREADS)
#pragma omp parallel private (x, sum)
{
id = omp_get_thread_num();
for (i=id,sum=0.0;i< num_steps;i=i+NUM_THREADS){
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
}
#pragma omp critical
pi += sum
}
} |
test_nest_lock_parallel.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main()
{
omp_nest_lock_t nest_lock;
omp_init_nest_lock(&nest_lock);
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
omp_set_nest_lock(&nest_lock);
print_fuzzy_address(1);
}
#pragma omp barrier
omp_test_nest_lock(&nest_lock); //should fail for non-master
print_fuzzy_address(2);
#pragma omp barrier
#pragma omp master
{
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(3);
omp_unset_nest_lock(&nest_lock);
print_fuzzy_address(4);
}
}
omp_destroy_nest_lock(&nest_lock);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID:[0-9]+]], hint=0, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_first: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint=0, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_prev: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_nest_lock_last: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NEXT: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_destroy_nest_lock: wait_id=[[WAIT_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_wait_nest_lock: wait_id=[[WAIT_ID]], hint=0, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK-NOT: {{^}}[[THREAD_ID]]: ompt_event_acquired_nest_lock_next: wait_id=[[WAIT_ID]]
// CHECK-NEXT: {{^}}[[THREAD_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
main.h | #include <algorithm>
#include <iostream>
#include <queue>
#include <sstream>
#include <stack>
#include <vector>
#include <set>
#include <map>
#include <unordered_map>
#include <utility>
#include <random>
#include <chrono>
#include <tuple>
#include <fstream>
#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/stat.h>
#include <string.h>
#include <omp.h>
#include <util/log/log.h>
#include <util/serialization/pretty_print.h>
#include <util/util.h>
#include <util/md5.h>
#include "bucket.h"
//#define DUMP_Hs
//#define DUMP_K
#define PRIME 251231
using namespace std;
#define TEST_SIZE 100 // for partialAND exps
typedef long long lol;
typedef int vertex;
typedef uint32_t edge;
//typedef lol vertex; // for big graph runs, vertices are 32 bytes
//typedef lol edge; // for big graph runs, edges are 32 bytes
typedef chrono::duration<double> tms;
typedef tuple<vertex, vertex> couple;
typedef tuple<vertex, vertex, vertex> triple;
struct triangle_id {
tuple<vertex, vertex, vertex> triple;
vertex id;
bool operator==(const triangle_id &other) const {
if (triple == other.triple)
return 1;
else {
return 0;
}
}
triangle_id() {
triple = make_tuple(-1, -1, -1);
id = -1;
}
};
namespace std {
template<>
struct hash<triangle_id> {
std::size_t operator()(const triangle_id &t) const {
return (get<0>(t.triple) * PRIME * PRIME + get<1>(t.triple)) * PRIME + get<2>(t.triple);
}
};
template<>
struct hash<couple> {
std::size_t operator()(const couple &c) const {
return (get<0>(c) * PRIME + get<1>(c));
}
};
template<>
struct hash<triple> {
std::size_t operator()(const triple &c) const {
return (get<0>(c) * PRIME * PRIME + get<1>(c) * PRIME + get<2>(c));
}
};
}
typedef vector<vector<vertex> > Graph;
template<class T>
inline void hash_combine(std::size_t &seed, const T &v) {
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
namespace std {
template<typename S, typename T>
struct hash<pair<S, T>> {
inline size_t operator()(const pair<S, T> &v) const {
size_t seed = 0;
::hash_combine(seed, v.first);
::hash_combine(seed, v.second);
return seed;
}
};
}
inline vertex findInd(vertex a, vertex i, vertex *ordered_adj, edge *ordered_xadj) {
for (vertex j = ordered_xadj[a]; j < ordered_xadj[a + 1]; j++)
if (ordered_adj[j] == i)
return j;
return -1;
}
inline bool isSmaller(edge *xadj, vertex u, vertex v) {
vertex deg_u = xadj[u + 1] - xadj[u];
vertex deg_v = xadj[v + 1] - xadj[v];
return (deg_u < deg_v || (deg_u == deg_v && u < v));
}
typedef tuple<int, int> eda;
inline bool kksort(eda i, eda j) { return (get<1>(i) > get<1>(j)); }
inline void print_Ks(edge nVtx, vertex *T, const char *vfile, int H = -1) {
string st(vfile);
if (H == -1)
st += "_FINAL_K";
else
st += "_H_" + to_string(H);
FILE *pp = fopen(st.c_str(), "w");
for (edge i = 0; i < nVtx; i++)
fprintf(pp, "%d\n", T[i]);
fclose(pp);
}
inline void read_Ks(size_t sz, const char *fl, vertex **P) {
string st(fl);
*P = (vertex *) malloc(sizeof(vertex) * sz);
FILE *fp = fopen(st.c_str(), "r");
vertex num;
for (size_t i = 0; i < sz; i++) {
fscanf(fp, "%d", &num);
if (num == -1)
(*P)[i] = 0;
else
(*P)[i] = num;
}
fclose(fp);
}
inline void intersection2(vertex *adj, edge *xadj, vertex u, vertex v, vector<vertex> &intersection) {
vertex i = xadj[u];
vertex j = xadj[v];
vertex gu = xadj[u + 1];
vertex gv = xadj[v + 1];
while (i < gu && j < gv) {
if (adj[i] < adj[j])
i++;
else if (adj[j] < adj[i])
j++;
else {
intersection.push_back(adj[i]);
i++;
j++;
}
}
}
inline void intersection3(vertex *adj, edge *xadj, vertex u, vertex v, vertex w, vector<vertex> &intersection) {
vertex i = xadj[u];
vertex j = xadj[v];
vertex k = xadj[w];
vertex gu = xadj[u + 1];
vertex gv = xadj[v + 1];
vertex gw = xadj[w + 1];
while (i < gu && j < gv && k < gw) {
vertex a = adj[i];
vertex b = adj[j];
vertex c = adj[k];
if (a == b && a == c) {
intersection.push_back(a);
i++;
j++;
k++;
} else {
vertex m = max(a, max(b, c));
if (a != m)
i++;
if (b != m)
j++;
if (c != m)
k++;
}
}
}
inline void createOrdered(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, couple *el, edge *xel, vertex *ordered_adj,
edge *ordered_xadj) {
edge xi = 0;
vertex i = 0;
xel[xi++] = 0;
edge oxi = 0;
vertex oi = 0;
ordered_xadj[oxi++] = 0;
for (vertex u = 0; u < nVtx; u++) {
for (auto j = xadj[u]; j < xadj[u + 1]; j++) {
vertex v = adj[j];
if (isSmaller(xadj, u, v)) {
ordered_adj[oi++] = v;
couple c = make_tuple(u, v);
el[i++] = c;
}
}
ordered_xadj[oxi++] = oi;
xel[xi++] = i;
}
}
template<typename T>
void printMaxTruss(int nEdge, T P) {
int max_truss = 0;
#pragma omp parallel for reduction(max:max_truss)
for (auto i = 0; i < nEdge; i++) {
max_truss = max(max_truss, P[i]);
}
log_info("Max Truss#: %d", max_truss);
}
template<typename T>
uint32_t LinearSearch(T *array, uint32_t offset_beg, uint32_t offset_end, int val) {
// linear search fallback
for (auto offset = offset_beg; offset < offset_end; offset++) {
if (array[offset] >= val) {
return offset;
}
}
return offset_end;
}
template<typename T>
uint32_t BinarySearchForGallopingSearch(const T *array, uint32_t offset_beg, uint32_t offset_end, int val) {
while (offset_end - offset_beg >= 32) {
auto mid = static_cast<uint32_t>((static_cast<unsigned long>(offset_beg) + offset_end) / 2);
_mm_prefetch((char *) &array[(static_cast<unsigned long>(mid + 1) + offset_end) / 2], _MM_HINT_T0);
_mm_prefetch((char *) &array[(static_cast<unsigned long>(offset_beg) + mid) / 2], _MM_HINT_T0);
if (array[mid] == val) {
return mid;
} else if (array[mid] < val) {
offset_beg = mid + 1;
} else {
offset_end = mid;
}
}
// linear search fallback
for (auto offset = offset_beg; offset < offset_end; offset++) {
if (array[offset] >= val) {
return offset;
}
}
return offset_end;
}
// Assuming (offset_beg != offset_end)
template<typename T>
uint32_t GallopingSearch(T *array, uint32_t offset_beg, uint32_t offset_end, int val) {
if (array[offset_end - 1] < val) {
return offset_end;
}
// galloping
if (array[offset_beg] >= val) {
return offset_beg;
}
if (array[offset_beg + 1] >= val) {
return offset_beg + 1;
}
if (array[offset_beg + 2] >= val) {
return offset_beg + 2;
}
auto jump_idx = 4u;
while (true) {
auto peek_idx = offset_beg + jump_idx;
if (peek_idx >= offset_end) {
return BinarySearchForGallopingSearch(array, (jump_idx >> 1) + offset_beg + 1, offset_end, val);
}
if (array[peek_idx] < val) {
jump_idx <<= 1;
} else {
return array[peek_idx] == val ? peek_idx :
BinarySearchForGallopingSearch(array, (jump_idx >> 1) + offset_beg + 1, peek_idx + 1, val);
}
}
}
template<typename T>
void core_val_histogram(int n, T &core, bool is_print = false) {
// core-value histogram
int max_core_val = 0;
vector<int32_t> histogram;
#pragma omp parallel
{
#pragma omp for reduction(max:max_core_val)
for (auto u = 0; u < n; u++) {
max_core_val = max(max_core_val, core[u]);
}
#pragma omp single
{
log_info("max value: %d", max_core_val);
histogram = vector<int32_t>(max_core_val + 1, 0);
}
#pragma omp for
for (auto u = 0; u < n; u++) {
auto core_val = core[u];
#pragma omp atomic
histogram[core_val]++;
}
}
if (is_print) {
if (histogram.size() < 400) {
stringstream ss;
ss << pretty_print_array(&histogram.front(), histogram.size());
log_info("values histogram: %s", ss.str().c_str());
} else {
{
stringstream ss;
ss << pretty_print_array(&histogram.front(), 100);
log_info("first100 values histogram: %s", ss.str().c_str());
}
{
stringstream ss;
ss << pretty_print_array(&histogram.front() + histogram.size() - 100, 100);
log_info("last100 values histogram: %s", ss.str().c_str());
}
}
}
{
stringstream ss;
ss << histogram << "\n";
log_info("Md5sum of histogram: %s", md5(ss.str()).c_str());
}
auto &bins = histogram;
auto bin_cnt = 0;
int64_t acc = 0;
auto thresh = n / 10;
auto last = 0;
for (auto i = 0; i < histogram.size(); i++) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == histogram.size() - 1) {
log_info("bin[%d - %d]: %s", last, i, FormatWithCommas(acc).c_str());
last = i + 1;
acc = 0;
}
}
}
log_info("Reversed Bins...");
last = histogram.size() - 1;
acc = 0;
for (int32_t i = histogram.size() - 1; i > -1; i--) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == 0) {
log_info("bin[%d - %d]: %s", i, last, FormatWithCommas(acc).c_str());
last = i + 1;
acc = 0;
}
}
}
log_info("total bin counts: %d", bin_cnt);
}
inline vertex commons(vector<vertex> &a, vector<vertex> &b) {
vertex i = 0, j = 0;
vertex count = 0;
while (i < a.size() && j < b.size()) {
if (a[i] < b[j])
i++;
else if (b[j] < a[i])
j++;
else {
count++;
i++;
j++;
}
}
return count;
}
inline bool hashUniquify(vector<vertex> &vertices) {
unordered_map<vertex, bool> hermap;
for (size_t i = 0; i < vertices.size(); i++) {
int t = vertices[i];
if (hermap.find(t) == hermap.end())
hermap[t] = true;
else {
vertices.erase(vertices.begin() + i);
i--;
}
}
sort(vertices.begin(), vertices.end());
return true;
}
void baseLocal12(vertex nVtx, vertex *adj, edge *xadj, vertex *P, const char *vfile);
void nmLocal12(vertex nVtx, vertex *adj, edge *xadj, vertex *P, const char *vfile);
void topKs(vertex nVtx, vertex *adj, edge *xadj, vertex *P, const char *vfile);
void kcore(vertex nVtx, vertex *adj, edge *xadj, vertex *K, const char *vfile);
void converge12onEgo(vertex nVtx, vertex *adj, edge *xadj, vertex *K, string kfile);
void baseLocal23(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T, const char *vfile);
void nmLocal23(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T);
void ktruss(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T, const char *vfile);
void converge23onEgo(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *K, string kfile);
void baseLocal34(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T, const char *vfile);
void nmLocal34(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T, const char *vfile);
void k34(vertex nVtx, edge nEdge, vertex *adj, edge *xadj, vertex *T, const char *vfile);
template<typename VtxType, typename EdgeType>
void readGraph(char *filename, VtxType *nVtx, EdgeType *nEdge, VtxType **adj, EdgeType **xadj);
|
essai.c | void functionA() {
}
void functionB() {
}
int main() {
//#pragma omp parallel sections
#pragma omp parallel
#pragma omp sections
{
#pragma omp section
{
functionA();
}
#pragma omp section
{
functionB();
}
}
}
|
displacement_lagrangemultiplier_mixed_frictional_contact_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_FRICTIONAL_CONTACT_CRITERIA_H)
#define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_FRICTIONAL_CONTACT_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "utilities/table_stream_utility.h"
#include "utilities/color_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
#include "custom_utilities/active_set_utilities.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@name Kratos Classes
///@{
/**
* @class DisplacementLagrangeMultiplierMixedFrictionalContactCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Convergence criteria for contact problems
* @details This class implements a convergence control based on nodal displacement and
* lagrange multiplier values. The error is evaluated separately for each of them, and
* relative and absolute tolerances for both must be specified.
* @author Vicente Mataix Ferrandiz
*/
template< class TSparseSpace,
class TDenseSpace >
class DisplacementLagrangeMultiplierMixedFrictionalContactCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of DisplacementLagrangeMultiplierMixedFrictionalContactCriteria
KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierMixedFrictionalContactCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT );
KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT );
KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The r_table stream definition TODO: Replace by logger
typedef TableStreamUtility::Pointer TablePrinterPointerType;
/// The index type definition
typedef std::size_t IndexType;
/// The key type definition
typedef std::size_t KeyType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor.
* @param DispRatioTolerance Relative tolerance for displacement residual error
* @param DispAbsTolerance Absolute tolerance for displacement residual error
* @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error
* @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error
* @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged
* @param EnsureContact To check if the contact is lost
* @param pTable The pointer to the output r_table
* @param PrintingOutput If the output is going to be printed in a txt file
*/
explicit DisplacementLagrangeMultiplierMixedFrictionalContactCriteria(
const TDataType DispRatioTolerance,
const TDataType DispAbsTolerance,
const TDataType LMNormalRatioTolerance,
const TDataType LMNormalAbsTolerance,
const TDataType LMTangentRatioTolerance,
const TDataType LMTangentAbsTolerance,
const TDataType NormalTangentRatio,
const bool EnsureContact = false,
const bool PureSlip = false,
const bool PrintingOutput = false
)
: BaseType()
{
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP, PureSlip);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
// The displacement residual
mDispRatioTolerance = DispRatioTolerance;
mDispAbsTolerance = DispAbsTolerance;
// The normal contact residual
mLMNormalRatioTolerance = LMNormalRatioTolerance;
mLMNormalAbsTolerance = LMNormalAbsTolerance;
// The tangent contact residual
mLMTangentRatioTolerance = LMTangentRatioTolerance;
mLMTangentAbsTolerance = LMTangentAbsTolerance;
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = NormalTangentRatio;
}
/**
* @brief Default constructor (parameters)
* @param ThisParameters The configuration parameters
*/
explicit DisplacementLagrangeMultiplierMixedFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})"))
: BaseType()
{
// The default parameters
Parameters default_parameters = Parameters(R"(
{
"ensure_contact" : false,
"pure_slip" : false,
"print_convergence_criterion" : false,
"residual_relative_tolerance" : 1.0e-4,
"residual_absolute_tolerance" : 1.0e-9,
"contact_displacement_relative_tolerance" : 1.0e-4,
"contact_displacement_absolute_tolerance" : 1.0e-9,
"frictional_contact_displacement_relative_tolerance" : 1.0e-4,
"frictional_contact_displacement_absolute_tolerance" : 1.0e-9,
"ratio_normal_tangent_threshold" : 1.0e-4
})" );
ThisParameters.ValidateAndAssignDefaults(default_parameters);
// The displacement residual
mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble();
// The normal contact solution
mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble();
mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble();
// The tangent contact solution
mLMTangentRatioTolerance = ThisParameters["frictional_contact_displacement_relative_tolerance"].GetDouble();
mLMTangentAbsTolerance = ThisParameters["frictional_contact_displacement_absolute_tolerance"].GetDouble();
// We get the ratio between the normal and tangent that will accepted as converged
mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble();
// Set local flags
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::TABLE_IS_INITIALIZED, false);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool());
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
//* Copy constructor.
DisplacementLagrangeMultiplierMixedFrictionalContactCriteria( DisplacementLagrangeMultiplierMixedFrictionalContactCriteria const& rOther )
:BaseType(rOther)
,mOptions(rOther.mOptions)
,mDispRatioTolerance(rOther.mDispRatioTolerance)
,mDispAbsTolerance(rOther.mDispAbsTolerance)
,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm)
,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm)
,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance)
,mLMTangentRatioTolerance(rOther.mLMNormalRatioTolerance)
,mLMTangentAbsTolerance(rOther.mLMNormalAbsTolerance)
,mNormalTangentRatio(rOther.mNormalTangentRatio)
{
}
/// Destructor.
~DisplacementLagrangeMultiplierMixedFrictionalContactCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something
// Getting process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// Compute the active set
if (!r_process_info[ACTIVE_SET_COMPUTED]) {
const array_1d<std::size_t, 2> is_converged = ActiveSetUtilities::ComputeALMFrictionalActiveSet(rModelPart, mOptions.Is(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP), this->GetEchoLevel());
// We save to the process info if the active set has converged
r_process_info[ACTIVE_SET_CONVERGED] = is_converged[0] == 0 ? true : false;
r_process_info[SLIP_SET_CONVERGED] = is_converged[1] == 0 ? true : false;
r_process_info[ACTIVE_SET_COMPUTED] = true;
}
// Initialize
TDataType disp_residual_solution_norm = 0.0, normal_lm_solution_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0;
IndexType disp_dof_num(0),lm_dof_num(0),lm_stick_dof_num(0),lm_slip_dof_num(0);
// The nodes array
auto& r_nodes_array = rModelPart.Nodes();
// First iterator
const auto it_dof_begin = rDofSet.begin();
// Auxiliar values
std::size_t dof_id = 0;
TDataType residual_dof_value = 0.0, dof_value = 0.0, dof_incr = 0.0;
// Loop over Dofs
#pragma omp parallel for reduction(+:disp_residual_solution_norm,normal_lm_solution_norm,normal_lm_increase_norm,disp_dof_num,lm_dof_num, lm_stick_dof_num, lm_slip_dof_num, dof_id,residual_dof_value,dof_value,dof_incr)
for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) {
auto it_dof = it_dof_begin + i;
if (it_dof->IsFree()) {
dof_id = it_dof->EquationId();
const auto curr_var = it_dof->GetVariable();
if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X);
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const TDataType normal_dof_value = dof_value * normal_x;
const TDataType normal_dof_incr = dof_incr * normal_x;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y);
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const TDataType normal_dof_value = dof_value * normal_y;
const TDataType normal_dof_incr = dof_incr * normal_y;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) {
// The normal of the node (TODO: how to solve this without accesing all the time to the database?)
const auto it_node = r_nodes_array.find(it_dof->Id());
const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z);
dof_value = it_dof->GetSolutionStepValue(0);
dof_incr = rDx[dof_id];
const TDataType normal_dof_value = dof_value * normal_z;
const TDataType normal_dof_incr = dof_incr * normal_z;
normal_lm_solution_norm += std::pow(normal_dof_value, 2);
normal_lm_increase_norm += std::pow(normal_dof_incr, 2);
if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) {
tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_slip_dof_num;
} else {
tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2);
tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2);
++lm_stick_dof_num;
}
lm_dof_num++;
} else {
residual_dof_value = rb[dof_id];
disp_residual_solution_norm += residual_dof_value * residual_dof_value;
disp_dof_num++;
}
}
}
if(normal_lm_increase_norm == 0.0) normal_lm_increase_norm = 1.0;
if(tangent_lm_stick_increase_norm == 0.0) tangent_lm_stick_increase_norm = 1.0;
if(tangent_lm_slip_increase_norm == 0.0) tangent_lm_slip_increase_norm = 1.0;
KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl;
mDispCurrentResidualNorm = disp_residual_solution_norm;
const TDataType normal_lm_ratio = std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm);
const TDataType tangent_lm_slip_ratio = std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm);
const TDataType tangent_lm_stick_ratio = std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm);
const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num);
const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0;
const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0;
const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs;
const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs;
TDataType residual_disp_ratio;
// We initialize the solution
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET)) {
mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm;
residual_disp_ratio = 1.0;
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, true);
}
// We calculate the ratio of the displacements
residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm;
// We calculate the absolute norms
TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num;
// We print the results // TODO: Replace for the new log
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
std::cout.precision(4);
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentRatioTolerance << tangent_lm_stick_abs << mLMTangentAbsTolerance << tangent_lm_slip_ratio << mLMTangentRatioTolerance << tangent_lm_slip_abs << mLMTangentAbsTolerance;
} else {
r_table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_slip_ratio << mLMTangentRatioTolerance << tangent_lm_slip_abs << mLMTangentAbsTolerance;
}
} else {
std::cout.precision(4);
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT)) {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT("MIXED CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT("\tNORMAL LAGRANGE MUL: RATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO_IF("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl;
} else {
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << "MIXED CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << "\tNORMAL LAGRANGE MUL: RATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl;
KRATOS_INFO_IF("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria", mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl;
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl;
}
}
}
// NOTE: Here we don't include the tangent counter part
r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > normal_lm_ratio) ? residual_disp_ratio : normal_lm_ratio;
r_process_info[RESIDUAL_NORM] = (normal_lm_abs > mLMNormalAbsTolerance) ? normal_lm_abs : mLMNormalAbsTolerance;
// We check if converged
const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance);
const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentRatioTolerance || tangent_lm_stick_abs <= mLMTangentAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentRatioTolerance || tangent_lm_slip_abs <= mLMTangentAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio);
if ( disp_converged && lm_converged ) {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FGRN(" Achieved"));
else
r_table << "Achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FGRN("achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << "\tConvergence is achieved" << std::endl;
}
}
return true;
} else {
if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) {
if (r_process_info.Has(TABLE_UTILITY)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT))
r_table << BOLDFONT(FRED(" Not achieved"));
else
r_table << "Not achieved";
} else {
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PRINTING_OUTPUT))
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << BOLDFONT("\tConvergence") << " is " << BOLDFONT(FRED(" not achieved")) << std::endl;
else
KRATOS_INFO("DisplacementLagrangeMultiplierMixedFrictionalContactCriteria") << "\tConvergence is not achieved" << std::endl;
}
}
return false;
}
} else // In this case all the displacements are imposed!
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize( ModelPart& rModelPart) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::TABLE_IS_INITIALIZED)) {
TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY];
auto& r_table = p_table->GetTable();
r_table.AddColumn("DP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("N.LM RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
if (mOptions.IsNot(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::PURE_SLIP)) {
r_table.AddColumn("STI. RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
}
r_table.AddColumn("SLIP RATIO", 10);
r_table.AddColumn("EXP. RAT", 10);
r_table.AddColumn("ABS", 10);
r_table.AddColumn("EXP. ABS", 10);
r_table.AddColumn("CONVERGENCE", 15);
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::TABLE_IS_INITIALIZED, true);
}
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
mOptions.Set(DisplacementLagrangeMultiplierMixedFrictionalContactCriteria::INITIAL_RESIDUAL_IS_SET, false);
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
Flags mOptions; /// Local flags
TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual
TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual
TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual
TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual
TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal)
TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal)
TDataType mLMTangentRatioTolerance; /// The ratio threshold for the norm of the LM (tangent)
TDataType mLMTangentAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent)
TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Kratos DisplacementLagrangeMultiplierMixedFrictionalContactCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(3, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags DisplacementLagrangeMultiplierMixedFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(4, false));
}
#endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_MIXED_FRICTIONAL_CONTACT_CRITERIA_H */
|
8d-example-simd.c | #include <math.h>
#include <stdio.h>
#include "immintrin.h"
#include <assert.h>
#include <omp.h>
#include "kmeans-simd.h"
#include "dataset_test.h"
#if 1
#define DEBUGD(X)\
d = &X;\
printf("Printing %s\n", #X); \
for(int _i=0; _i<4; _i++)\
printf("X[%d] = %f ", _i, d[_i]);\
printf("\n\n");
#define DEBUGS(X)\
d = &X;\
printf("Printing %s\n", #X); \
for(int _i=0; _i<2; _i++)\
printf("X[%d] = %f ", _i, d[_i]);\
printf("\n\n");
#define DEBUGF(X)\
d = &X;\
printf("Printing %s\n", #X); \
for(int _i=0; _i<8; _i++)\
printf("X[%d] = %f ", _i, d[_i]);\
printf("\n\n");
#else
#define DEBUGD(X)
#define DEBUGS(X)
#endif
#define ARRAY_LEN(X) (sizeof(X)/sizeof((X)[0]))
#define DEBUG
#ifdef DEBUG
#define dbg_printf printf
#else
#define dbg_printf(...)
#endif
//timing routine for reading the time stamp counter
static __inline__ unsigned long long rdtsc(void) {
unsigned hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
}
/* Unreadable transpose function. Works though :) */
static void transpose_8_kernel(float *dest, float *src, int src_offset, int dst_offest)
{
register __m256i r0, r1, r2, r3, r4, r5, r6, r7;
register __m256i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
float *dest0, *dest1, *dest2, *dest3, *dest4, *dest5, *dest6, *dest7;
float *d;
dest0 = dest;
dest1 = dest + 1*dst_offest;
dest2 = dest + 2*dst_offest;
dest3 = dest + 3*dst_offest;
dest4 = dest + 4*dst_offest;
dest5 = dest + 5*dst_offest;
dest6 = dest + 6*dst_offest;
dest7 = dest + 7*dst_offest;
r0 = _mm256_loadu_si256(src);
r1 = _mm256_loadu_si256(src+1*src_offset);
r2 = _mm256_loadu_si256(src+2*src_offset);
r3 = _mm256_loadu_si256(src+3*src_offset);
r4 = _mm256_loadu_si256(src+4*src_offset);
r5 = _mm256_loadu_si256(src+5*src_offset);
r6 = _mm256_loadu_si256(src+6*src_offset);
r7 = _mm256_loadu_si256(src+7*src_offset);
tmp0 = _mm256_unpacklo_epi32(r0, r1);
tmp1 = _mm256_unpackhi_epi32(r0, r1);
tmp2 = _mm256_unpacklo_epi32(r2, r3);
tmp3 = _mm256_unpackhi_epi32(r2, r3);
tmp4 = _mm256_unpacklo_epi32(r4, r5);
tmp5 = _mm256_unpackhi_epi32(r4, r5);
tmp6 = _mm256_unpacklo_epi32(r6, r7);
tmp7 = _mm256_unpackhi_epi32(r6, r7);
r0 = _mm256_unpacklo_epi64(tmp0, tmp2);
r1 = _mm256_unpackhi_epi64(tmp0, tmp2);
r2 = _mm256_unpacklo_epi64(tmp1, tmp3);
r3 = _mm256_unpackhi_epi64(tmp1, tmp3);
r4 = _mm256_unpacklo_epi64(tmp4, tmp6);
r5 = _mm256_unpackhi_epi64(tmp4, tmp6);
r6 = _mm256_unpacklo_epi64(tmp5, tmp7);
r7 = _mm256_unpackhi_epi64(tmp5, tmp7);
tmp0 = _mm256_permute2f128_si256(r0, r4, 0x20);
tmp1 = _mm256_permute2f128_si256(r1, r5, 0x20);
tmp2 = _mm256_permute2f128_si256(r2, r6, 0x20);
tmp3 = _mm256_permute2f128_si256(r3, r7, 0x20);
tmp4 = _mm256_permute2f128_si256(r0, r4, 0x31);
tmp5 = _mm256_permute2f128_si256(r1, r5, 0x31);
tmp6 = _mm256_permute2f128_si256(r2, r6, 0x31);
tmp7 = _mm256_permute2f128_si256(r3, r7, 0x31);
_mm256_storeu_si256(dest0, tmp0);
_mm256_storeu_si256(dest1, tmp1);
_mm256_storeu_si256(dest2, tmp2);
_mm256_storeu_si256(dest3, tmp3);
_mm256_storeu_si256(dest4, tmp4);
_mm256_storeu_si256(dest5, tmp5);
_mm256_storeu_si256(dest6, tmp6);
_mm256_storeu_si256(dest7, tmp7);
}
static void d_transpose(float *transpose, float *objs, int num_objs,
int src_offset, int src_increment,
int dst_offset, int dst_increment)
{
float *dest;
float *src;
int n = 0;
for(n=0; n<num_objs/DIM; n++){
src = objs + n*src_increment;
dest = transpose + n*dst_increment;
transpose_8_kernel(dest, src, src_offset, dst_offset);
}
}
/* Compute distance of 56 points(in 7 vectors) */
static void distance_7_kernel(float *dest, float *src, float *c_src, int src_offset)
{
register __m256 c;
register __m256 r0, r1, r2, r3, r4, r5, r6;
register __m256 acc0, acc1, acc2, acc3, acc4, acc5, acc6;
float *d;
/* Init accumulaors */
acc0 = _mm256_setzero_ps();
acc1 = _mm256_setzero_ps();
acc2 = _mm256_setzero_ps();
acc3 = _mm256_setzero_ps();
acc4 = _mm256_setzero_ps();
acc5 = _mm256_setzero_ps();
acc6 = _mm256_setzero_ps();
for(int i=0; i<DIM; i++){
c = _mm256_broadcast_ss(c_src + i);
/* Load tranposed points */
r0 = _mm256_loadu_ps(src);
r1 = _mm256_loadu_ps(src+8);
r2 = _mm256_loadu_ps(src+16);
r3 = _mm256_loadu_ps(src+24);
r4 = _mm256_loadu_ps(src+32);
r5 = _mm256_loadu_ps(src+40);
r6 = _mm256_loadu_ps(src+48);
/* Get diff */
r0 = _mm256_sub_ps(r0, c);
r1 = _mm256_sub_ps(r1, c);
r2 = _mm256_sub_ps(r2, c);
r3 = _mm256_sub_ps(r3, c);
r4 = _mm256_sub_ps(r4, c);
r5 = _mm256_sub_ps(r5, c);
r6 = _mm256_sub_ps(r6, c);
/* Accumulate */
acc0 = _mm256_fmadd_ps(r0, r0, acc0);
acc1 = _mm256_fmadd_ps(r1, r1, acc1);
acc2 = _mm256_fmadd_ps(r2, r2, acc2);
acc3 = _mm256_fmadd_ps(r3, r3, acc3);
acc4 = _mm256_fmadd_ps(r4, r4, acc4);
acc5 = _mm256_fmadd_ps(r5, r5, acc5);
acc6 = _mm256_fmadd_ps(r6, r6, acc6);
/* Now we have distance^2 for 56 points in 1 dim
* Loop to get all dims */
src += src_offset;
}
/* Now store out computed distance for this cluster */
_mm256_storeu_ps(dest, acc0);
_mm256_storeu_ps(dest+8, acc1);
_mm256_storeu_ps(dest+16, acc2);
_mm256_storeu_ps(dest+24, acc3);
_mm256_storeu_ps(dest+32, acc4);
_mm256_storeu_ps(dest+40, acc5);
_mm256_storeu_ps(dest+48, acc6);
}
static float d_distance(kmeans_config *config)
{
int i;
#pragma omp parallel for
for(int k=0; k < config->k; k++) {
for(int j=0; j<config->num_objs/(DIM*DISTANCE_KERNEL_NUM_POINTS); j++){
int src_offset = config->num_objs;
float *src = config->transpose_arr + j*DIM*DISTANCE_KERNEL_NUM_POINTS;
float *dest = config->distance_arr + k*config->num_objs + j*DIM*DISTANCE_KERNEL_NUM_POINTS;
distance_7_kernel(dest, src, config->centers + k*DIM, src_offset);
}
}
}
/* Kernel to find the closest cluster (min operation)
* It then outputs a mask vector, with the cluster it
* belongs to having the value 1.0, and remaining as 0 */
static void compare_8_kernel(float *dest, float *d_src, int d_offset)
{
register __m256 r0, r1, r2, r3, r4, r5, r6, r7;
register __m256 t0, t1, t2, t3, t4, t5, t6, t7;
register __m256 min;
/* Load distances of 8 points from 8 clusters */
r0 = _mm256_loadu_ps(d_src);
r1 = _mm256_loadu_ps(d_src+1*d_offset);
r2 = _mm256_loadu_ps(d_src+2*d_offset);
r3 = _mm256_loadu_ps(d_src+3*d_offset);
r4 = _mm256_loadu_ps(d_src+4*d_offset);
r5 = _mm256_loadu_ps(d_src+5*d_offset);
r6 = _mm256_loadu_ps(d_src+6*d_offset);
r7 = _mm256_loadu_ps(d_src+7*d_offset);
/* Find min */
t0 = _mm256_min_ps(r0, r1);
t2 = _mm256_min_ps(r2, r3);
t4 = _mm256_min_ps(r4, r5);
t6 = _mm256_min_ps(r6, r7);
t0 = _mm256_min_ps(t0, t2);
t4 = _mm256_min_ps(t4, t6);
min = _mm256_min_ps(t0, t4);
/* Perform transpose of distances */
t0 = (__m256)_mm256_unpacklo_epi32((__m256i)r0, (__m256i)r1);
t1 = (__m256)_mm256_unpackhi_epi32((__m256i)r0, (__m256i)r1);
t2 = (__m256)_mm256_unpacklo_epi32((__m256i)r2, (__m256i)r3);
t3 = (__m256)_mm256_unpackhi_epi32((__m256i)r2, (__m256i)r3);
t4 = (__m256)_mm256_unpacklo_epi32((__m256i)r4, (__m256i)r5);
t5 = (__m256)_mm256_unpackhi_epi32((__m256i)r4, (__m256i)r5);
t6 = (__m256)_mm256_unpacklo_epi32((__m256i)r6, (__m256i)r7);
t7 = (__m256)_mm256_unpackhi_epi32((__m256i)r6, (__m256i)r7);
r0 = (__m256)_mm256_unpacklo_epi64((__m256i)t0, (__m256i)t2);
r1 = (__m256)_mm256_unpackhi_epi64((__m256i)t0, (__m256i)t2);
r2 = (__m256)_mm256_unpacklo_epi64((__m256i)t1, (__m256i)t3);
r3 = (__m256)_mm256_unpackhi_epi64((__m256i)t1, (__m256i)t3);
r4 = (__m256)_mm256_unpacklo_epi64((__m256i)t4, (__m256i)t6);
r5 = (__m256)_mm256_unpackhi_epi64((__m256i)t4, (__m256i)t6);
r6 = (__m256)_mm256_unpacklo_epi64((__m256i)t5, (__m256i)t7);
r7 = (__m256)_mm256_unpackhi_epi64((__m256i)t5, (__m256i)t7);
t0 = (__m256)_mm256_permute2f128_si256((__m256i)r0, (__m256i)r4, 0x20);
t1 = (__m256)_mm256_permute2f128_si256((__m256i)r1, (__m256i)r5, 0x20);
t2 = (__m256)_mm256_permute2f128_si256((__m256i)r2, (__m256i)r6, 0x20);
t3 = (__m256)_mm256_permute2f128_si256((__m256i)r3, (__m256i)r7, 0x20);
t4 = (__m256)_mm256_permute2f128_si256((__m256i)r0, (__m256i)r4, 0x31);
t5 = (__m256)_mm256_permute2f128_si256((__m256i)r1, (__m256i)r5, 0x31);
t6 = (__m256)_mm256_permute2f128_si256((__m256i)r2, (__m256i)r6, 0x31);
t7 = (__m256)_mm256_permute2f128_si256((__m256i)r3, (__m256i)r7, 0x31);
r5 = _mm256_set1_ps(1.0);
/* broadcast mins */
r0 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(0));
r1 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(1));
r2 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(2));
r3 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(3));
r4 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(4));
/* Compare */
r0 = _mm256_cmp_ps(t0, r0, _CMP_EQ_OQ);
r1 = _mm256_cmp_ps(t1, r1, _CMP_EQ_OQ);
r2 = _mm256_cmp_ps(t2, r2, _CMP_EQ_OQ);
r3 = _mm256_cmp_ps(t3, r3, _CMP_EQ_OQ);
r4 = _mm256_cmp_ps(t4, r4, _CMP_EQ_OQ);
/* And */
r0 = _mm256_and_ps(r0, r5);
r1 = _mm256_and_ps(r1, r5);
r2 = _mm256_and_ps(r2, r5);
r3 = _mm256_and_ps(r3, r5);
r4 = _mm256_and_ps(r4, r5);
/* Store out */
_mm256_storeu_ps(dest, r0);
_mm256_storeu_ps(dest+1*8, r1);
_mm256_storeu_ps(dest+2*8, r2);
_mm256_storeu_ps(dest+3*8, r3);
_mm256_storeu_ps(dest+4*8, r4);
/* broadcast mins. r0, r1, r2 no longer needed */
r0 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(5));
r1 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(6));
r2 = _mm256_permutevar8x32_ps(min, _mm256_set1_epi32(7));
/* Compare */
r0 = _mm256_cmp_ps(t5, r0, _CMP_EQ_OQ);
r1 = _mm256_cmp_ps(t6, r1, _CMP_EQ_OQ);
r2 = _mm256_cmp_ps(t7, r2, _CMP_EQ_OQ);
/* And */
r0 = _mm256_and_ps(r0, r5);
r1 = _mm256_and_ps(r1, r5);
r2 = _mm256_and_ps(r2, r5);
/* Store out */
_mm256_storeu_ps(dest+5*8, r0);
_mm256_storeu_ps(dest+6*8, r1);
_mm256_storeu_ps(dest+7*8, r2);
}
/* Compute new cluster assignments
* Outputs a mask for assignments
* */
static void d_centroid(kmeans_config *config)
{
#pragma omp parallel for
for(int i=0; i<config->num_objs/CENTROID_KERNEL_NUM_POINTS; i++){
int d_offset = config->num_objs;
float *dest = config->mask_arr + i*CENTROID_KERNEL_NUM_POINTS*DIM;
float *d_src = config->distance_arr + i*DIM;
compare_8_kernel(dest, d_src, d_offset);
}
}
static void d_means(kmeans_config *config)
{
float *dest;
register __m256 c0, c1, c2, c3, c4, c5, c6, c7;
register __m256 macc;
float *d;
unsigned long long t0, t1;
c0 = _mm256_setzero_ps();
c1 = _mm256_setzero_ps();
c2 = _mm256_setzero_ps();
c3 = _mm256_setzero_ps();
c4 = _mm256_setzero_ps();
c5 = _mm256_setzero_ps();
c6 = _mm256_setzero_ps();
c7 = _mm256_setzero_ps();
macc = _mm256_setzero_ps();
/* Start of kernel
* Each iteration of the loop accumulates
* the 8 dimensions of a single point */
for(int i=0; i<config->num_objs; i++){
register __m256 r0, r1, r2, r3, r4;
register __m256 mask;
float *msrc = config->mask_arr + i*DIM;
float *src = config->objs + i*DIM;
/* Add dimensions for 1 point*/
mask = _mm256_loadu_ps(msrc);
r0 = _mm256_broadcast_ss(src);
r1 = _mm256_broadcast_ss(src+1);
r2 = _mm256_broadcast_ss(src+2);
r3 = _mm256_broadcast_ss(src+3);
r4 = _mm256_broadcast_ss(src+4);
c0 = _mm256_fmadd_ps(r0, mask, c0);
r0 = _mm256_broadcast_ss(src+5);
c1 = _mm256_fmadd_ps(r1, mask, c1);
r1 = _mm256_broadcast_ss(src+6);
c2 = _mm256_fmadd_ps(r2, mask, c2);
r2 = _mm256_broadcast_ss(src+7);
c3 = _mm256_fmadd_ps(r3, mask, c3);
c4 = _mm256_fmadd_ps(r4, mask, c4);
c5 = _mm256_fmadd_ps(r0, mask, c5);
c6 = _mm256_fmadd_ps(r1, mask, c6);
c7 = _mm256_fmadd_ps(r2, mask, c7);
macc = _mm256_add_ps(mask, macc);
}
/* Now we have sum of all dimensions of all points.
* c0 stores sum of x dimensions of 8 clusters,
* c1 stores sum of y dimensions of 8 clusters, and so on.
* We take the transpose of c0-c7 so that now
* c0 will store the new centroid coordinates of Centroid 0
* c1 will store the new centroid coordinates of Centroid 1
* and so on.(after division, of course)
*/
__m256 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
__m256 b0, b1, b2, b3, b4, b5, b6, b7;
/* Take transpose */
tmp0 = _mm256_unpacklo_ps(c0, c1);
tmp1 = _mm256_unpackhi_ps(c0, c1);
tmp2 = _mm256_unpacklo_ps(c2, c3);
tmp3 = _mm256_unpackhi_ps(c2, c3);
tmp4 = _mm256_unpacklo_ps(c4, c5);
tmp5 = _mm256_unpackhi_ps(c4, c5);
tmp6 = _mm256_unpacklo_ps(c6, c7);
tmp7 = _mm256_unpackhi_ps(c6, c7);
c0 = (__m256)_mm256_unpacklo_pd((__m256d)tmp0, (__m256d)tmp2);
c1 = (__m256)_mm256_unpackhi_pd((__m256d)tmp0, (__m256d)tmp2);
c2 = (__m256)_mm256_unpacklo_pd((__m256d)tmp1, (__m256d)tmp3);
c3 = (__m256)_mm256_unpackhi_pd((__m256d)tmp1, (__m256d)tmp3);
c4 = (__m256)_mm256_unpacklo_pd((__m256d)tmp4, (__m256d)tmp6);
c5 = (__m256)_mm256_unpackhi_pd((__m256d)tmp4, (__m256d)tmp6);
c6 = (__m256)_mm256_unpacklo_pd((__m256d)tmp5, (__m256d)tmp7);
c7 = (__m256)_mm256_unpackhi_pd((__m256d)tmp5, (__m256d)tmp7);
tmp0 = _mm256_permute2f128_ps(c0, c4, 0x20);
tmp1 = _mm256_permute2f128_ps(c1, c5, 0x20);
tmp2 = _mm256_permute2f128_ps(c2, c6, 0x20);
tmp3 = _mm256_permute2f128_ps(c3, c7, 0x20);
tmp4 = _mm256_permute2f128_ps(c0, c4, 0x31);
tmp5 = _mm256_permute2f128_ps(c1, c5, 0x31);
tmp6 = _mm256_permute2f128_ps(c2, c6, 0x31);
tmp7 = _mm256_permute2f128_ps(c3, c7, 0x31);
b0 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(0));
b1 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(1));
b2 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(2));
b3 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(3));
b4 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(4));
/* If nothing assigned to cluster, don't divide(will end
* up dividing by 0 otherwise)
*/
d = &b0;
if(d[0])
tmp0 = _mm256_div_ps(tmp0, b0);
d = &b1;
if(d[0])
tmp1 = _mm256_div_ps(tmp1, b1);
d = &b2;
if(d[0])
tmp2 = _mm256_div_ps(tmp2, b2);
d = &b3;
if(d[0])
tmp3 = _mm256_div_ps(tmp3, b3);
d = &b4;
if(d[0])
tmp4 = _mm256_div_ps(tmp4, b4);
b0 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(5));
b1 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(6));
b2 = _mm256_permutevar8x32_ps(macc, _mm256_set1_epi32(7));
d = &b0;
if(d[0])
tmp5 = _mm256_div_ps(tmp5, b0);
d = &b1;
if(d[0])
tmp6 = _mm256_div_ps(tmp6, b1);
d = &b2;
if(d[0])
tmp7 = _mm256_div_ps(tmp7, b2);
dest = config->centers;
_mm256_storeu_ps(dest, tmp0);
_mm256_storeu_ps(dest+8, tmp1);
_mm256_storeu_ps(dest+16, tmp2);
_mm256_storeu_ps(dest+24, tmp3);
_mm256_storeu_ps(dest+32, tmp4);
_mm256_storeu_ps(dest+40, tmp5);
_mm256_storeu_ps(dest+48, tmp6);
_mm256_storeu_ps(dest+56, tmp7);
}
void run_kmeans(void)
{
/* Prepare configs, and call kmeans */
unsigned long long t0, t1;
float c[][8] = {
{1.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0},
{1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{2.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{3.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{4.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{5.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{6.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
{7.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0},
};
kmeans_config config;
kmeans_result result;
int i, dim, num_in_0;
config.num_objs = ARRAY_LEN(dataset);
config.k = 8;
config.max_iterations = 1000;
config.distance_method = d_distance;
config.centroid_method = d_centroid;
config.means_method = d_means;
config.transpose_method = d_transpose;
config.transpose_arr = NULL;
config.distance_arr = NULL;
config.mask_arr = NULL;
config.clusters = malloc(config.num_objs * sizeof(int));
if(config.clusters == NULL){
assert(1);
}
config.centers = &c[0][0];
config.objs = &dataset[0][0];
/* run k-means */
t0 = rdtsc();
result = kmeans(&config);
t1 = rdtsc();
int fin_arr[DIM] = {0};
/* print result */
for (i = 0; i < config.num_objs; i++)
{
int done = 0;
for(int j=0; j<DIM; j++){
if(config.mask_arr[i*DIM + j] == 1.0){
assert(done == 0);
done = 1;
fin_arr[j]++;
}
}
}
for (i = 0; i < config.k; i++){
dbg_printf("Centroid %d [", i);
float *center = config.centers + i*DIM;
for(dim = 0; dim<DIM; dim++){
dbg_printf("%f, ", center[dim]);
}
dbg_printf("]\n");
}
dbg_printf("Num in each :\n");
int total=0;
for(int j=0; j<DIM; j++){
total += fin_arr[j];
dbg_printf("%d : %d\n", j, fin_arr[j]);
}
dbg_printf("Took %d iterations, cycles = %ld, total = %d\n",
config.total_iterations, t1 - t0, total);
free(config.transpose_arr);
free(config.distance_arr);
free(config.distance_transpose_arr);
free(config.mask_arr);
free(config.clusters);
}
int
main(int nargs, char **args)
{
unsigned long long t0, t1;
t0 = rdtsc();
for(int i=0; i<1; i++)
{
printf("iteration %d\n", i);
run_kmeans();
}
t1 = rdtsc();
printf("Took cycles = %ld\n",
t1 - t0);
}
|
trsm_x_csr_u_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = m - 1; r >= 0; r--)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac > r)
{
alpha_madde(temp, A->values[ai], y[out_y_col * ldy + ac]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[out_y_col * ldx + r]);
alpha_sub(y[out_y_col * ldy + r], t, temp);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
graph_test.c | #include <stdlib.h>
#include <stdbool.h>
#if defined (_OPENMP)
#include <omp.h>
#endif
#include "../src/minunit.h"
#include "../src/graph.h"
graph_t g;
void test_setup(void) {
g = g_init();
}
void test_teardown(void) {
g_free(g);
g = NULL;
}
/* private functions */
static char* __str_duplicate(const char* s);
static void __add_vertices(graph_t g, int num);
static void __add_edge(graph_t g, unsigned int src, unsigned int dest, int val);
/*******************************************************************************
* Test the setup
*******************************************************************************/
MU_TEST(test_default_setup) {
mu_assert_int_eq(0, g_num_edges(g));
mu_assert_int_eq(0, g_num_vertices(g));
}
/*******************************************************************************
* Test adding and removing vertices
*******************************************************************************/
MU_TEST(test_add_vertices) {
mu_assert_int_eq(0, g_num_vertices(g));
vertex_t v = g_vertex_add(g, __str_duplicate("this is a test"));
mu_assert_int_eq(1, g_num_vertices(g));
mu_assert_int_eq(0, g_vertex_id(v));
mu_assert_string_eq("this is a test", (char*)g_vertex_metadata(v));
g_vertex_add(g, __str_duplicate("d3-football"));
g_vertex_add(g, __str_duplicate("college hoops"));
mu_assert_int_eq(3, g_num_vertices(g));
v = g_vertex_get(g, 1);
mu_assert_int_eq(1, g_vertex_id(v));
mu_assert_string_eq("d3-football", (char*)g_vertex_metadata(v));
}
MU_TEST(test_add_verticies_idx_error) {
vertex_t v = g_vertex_add(g, __str_duplicate("we are here..."));
v = g_vertex_add_alt(g, 0, NULL); // this should be an error!
mu_assert_null(v);
}
MU_TEST(test_add_vertex_large_idx) {
vertex_t v = g_vertex_add_alt(g, 4097, __str_duplicate("we are here..."));
mu_assert_int_eq(1, g_num_vertices(g));
mu_assert_int_eq(4097, g_vertex_id(v));
vertex_t q = g_vertex_get(g, 4097);
mu_assert_int_eq(4097, g_vertex_id(q));
}
MU_TEST(test_remove_vertices) {
mu_assert_int_eq(0, g_num_vertices(g));
g_vertex_add(g, __str_duplicate("this is a test"));
g_vertex_add(g, __str_duplicate("d3-football"));
g_vertex_add(g, __str_duplicate("college hoops"));
mu_assert_int_eq(3, g_num_vertices(g));
vertex_t v = g_vertex_remove(g, 0);
mu_assert_int_eq(0, g_vertex_id(v));
mu_assert_string_eq("this is a test", (char*)g_vertex_metadata(v));
mu_assert_int_eq(2, g_num_vertices(g));
g_vertex_free(v);
v = g_vertex_remove(g, 2);
mu_assert_int_eq(2, g_vertex_id(v));
mu_assert_string_eq("college hoops", (char*)g_vertex_metadata(v));
mu_assert_int_eq(1, g_num_vertices(g));
g_vertex_free(v);
/* check that something removed is clean! */
v = g_vertex_remove(g, 0);
mu_assert_null(v);
mu_assert_int_eq(1, g_num_vertices(g)); /* it shouldn't change the num vertices */
}
MU_TEST(test_vertices_growth) {
__add_vertices(g, 4000); /* add 4000 vertices! */
unsigned int i;
for (i = 0; i < 4000; i++) { /* good for checking order when using openmp; only "check" if would fail */
vertex_t v = g_vertex_get(g, i);
if (g_vertex_id(v) != i) {
mu_assert_int_eq(g_vertex_id(v), i);
}
}
mu_assert_int_eq(4000, g_num_vertices(g));
}
MU_TEST(test_updating_vertex_metadata) {
__add_vertices(g, 5);
vertex_t v = g_vertex_get(g, 0);
int* metadata = (int*)g_vertex_metadata(v);
mu_assert_int_eq(0, *metadata);
*metadata = 255;
g_vertex_metadata_update(v, metadata);
v = g_vertex_get(g, 0);
mu_assert_int_eq(255, *(int*)g_vertex_metadata(v));
}
MU_TEST(test_get_vertex_errors) {
__add_vertices(g, 5);
mu_assert_null(g_vertex_get(g, 10));
}
/*******************************************************************************
* Test adding and removing edges
*******************************************************************************/
MU_TEST(test_add_edges) {
__add_vertices(g, 15);
mu_assert_int_eq(15, g_num_vertices(g));
g_edge_add(g, 0, 1, __str_duplicate("1"));
g_edge_add(g, 0, 2, __str_duplicate("2"));
g_edge_add(g, 0, 3, __str_duplicate("3"));
g_edge_add(g, 0, 4, __str_duplicate("4"));
g_edge_add(g, 0, 5, __str_duplicate("5"));
g_edge_add(g, 0, 6, __str_duplicate("6"));
mu_assert_int_eq(6, g_num_edges(g));
edge_t e = g_edge_get(g, 5);
mu_assert_int_eq(0, g_edge_src(e));
mu_assert_int_eq(6, g_edge_dest(e));
mu_assert_string_eq("6", (char*)g_edge_metadata(e));
}
MU_TEST(test_remove_edges) {
__add_vertices(g, 15);
mu_assert_int_eq(15, g_num_vertices(g));
g_edge_add(g, 0, 1, __str_duplicate("1"));
g_edge_add(g, 0, 2, __str_duplicate("2"));
g_edge_add(g, 0, 3, __str_duplicate("3"));
g_edge_add(g, 0, 4, __str_duplicate("4"));
g_edge_add(g, 0, 5, __str_duplicate("5"));
g_edge_add(g, 0, 6, __str_duplicate("6"));
mu_assert_int_eq(6, g_num_edges(g));
edge_t e = g_edge_remove(g, 0);
mu_assert_int_eq(0, g_edge_id(e));
mu_assert_int_eq(0, g_edge_src(e));
mu_assert_int_eq(1, g_edge_dest(e));
mu_assert_int_eq(5, g_num_edges(g));
g_edge_free(e);
}
MU_TEST(test_remove_edges_src) {
/* this tests removing a vertex and ensuring that all the edges it was
attached to are also removed */
__add_vertices(g, 15);
mu_assert_int_eq(15, g_num_vertices(g));
g_edge_add(g, 0, 1, __str_duplicate("0-1"));
g_edge_add(g, 0, 2, __str_duplicate("0-2"));
g_edge_add(g, 0, 3, __str_duplicate("0-3"));
g_edge_add(g, 0, 4, __str_duplicate("0-4"));
g_edge_add(g, 0, 5, __str_duplicate("0-5"));
g_edge_add(g, 0, 6, __str_duplicate("0-6"));
g_edge_add(g, 14, 0, __str_duplicate("15-0"));
/* not attached edges */
g_edge_add(g, 1, 10, __str_duplicate("1-10"));
mu_assert_int_eq(8, g_num_edges(g));
vertex_t v = g_vertex_get(g, 0);
mu_assert_int_eq(6, g_vertex_num_edges_out(v));
mu_assert_int_eq(1, g_vertex_num_edges_in(v));
v = g_vertex_remove(g, 0);
mu_assert_int_eq(14, g_num_vertices(g));
mu_assert_int_eq(1, g_num_edges(g));
g_vertex_free(v);
}
MU_TEST(test_edges_growth) {
__add_vertices(g, 4000); /* add 4000 vertices! */
mu_assert_int_eq(4000, g_num_vertices(g));
unsigned int i;
for (i = 0; i < 4000; i++) {
vertex_t v = g_vertex_get(g, i);
if (v == NULL)
printf("Vertex id=%d == NULL\n", i);
// else
// mu_assert_int_eq(i, g_vertex_id(v));
}
for (i = 1; i < g_num_vertices(g); i++) {
edge_t e = g_edge_add(g, i - 1, i, NULL);
if (e == NULL) {
printf("e was NULL: %d\n", i);
}
}
mu_assert_int_eq(3999, g_num_edges(g));
}
MU_TEST(test_updating_edge_metadata) {
__add_vertices(g, 15);
g_edge_add(g, 0, 1, __str_duplicate("0-1"));
g_edge_add(g, 0, 2, __str_duplicate("0-2"));
g_edge_add(g, 0, 3, __str_duplicate("0-3"));
edge_t e = g_edge_get(g, 0);
char* metadata = (char*)g_edge_metadata(e);
mu_assert_string_eq("0-1", metadata);
metadata[0] = '-';
g_edge_metadata_update(e, metadata);
char* val = (char*)g_edge_metadata(e);
mu_assert_string_eq("--1", val);
}
MU_TEST(test_edge_add_error) {
__add_vertices(g, 5);
edge_t e = g_edge_add(g, 0, 5, NULL); /* this should return NULL since dest is too large*/
mu_assert_null(e);
e = g_edge_add(g, 6, 0, NULL);
mu_assert_null(e);
/* now remove a vertex and try to add an edge to it */
vertex_t v = g_vertex_remove(g, 0);
g_vertex_free(v);
e = g_edge_add(g, 0, 1, NULL);
mu_assert_null(e);
e = g_edge_add(g, 1, 0, NULL);
mu_assert_null(e);
}
MU_TEST(test_edge_remove_error) {
__add_vertices(g, 5);
g_edge_add(g, 0, 4, NULL);
g_edge_add(g, 1, 4, NULL);
g_edge_add(g, 2, 4, NULL);
g_edge_add(g, 3, 4, NULL);
edge_t e = g_edge_remove(g, 5);
mu_assert_null(e);
e = g_edge_remove(g, 3); /* this should be fine */
g_edge_free(e);
e = g_edge_remove(g, 3); /* now we should get a NULL back */
mu_assert_null(e);
}
MU_TEST(test_edge_get_error) {
__add_vertices(g, 5);
g_edge_add(g, 0, 4, NULL);
mu_assert_null(g_edge_get(g, 2));
}
/*******************************************************************************
* Test iterating over the vertices
*******************************************************************************/
MU_TEST(test_iterate_vertices_all_there) {
__add_vertices(g, 5);
unsigned int i;
vertex_t v;
g_iterate_vertices(g,v,i) {
mu_assert_int_eq(i, *(int*)g_vertex_metadata(v));
}
}
MU_TEST(test_iterate_vertices_some_removed) {
__add_vertices(g, 5);
vertex_t t = g_vertex_remove(g, 2);
g_vertex_free(t);
unsigned int i;
vertex_t v;
g_iterate_vertices(g,v,i) {
mu_assert_int_eq(i, *(int*)g_vertex_metadata(v));
}
}
MU_TEST(test_iterate_edges) {
__add_vertices(g, 5);
__add_edge(g, 0, 1, 0);
__add_edge(g, 0, 2, 1);
__add_edge(g, 0, 3, 2);
__add_edge(g, 0, 4, 3);
unsigned int i, j = 0;
edge_t e;
vertex_t v = g_vertex_get(g, 0);
g_iterate_edges(v, e, i) {
mu_assert_int_eq(i, *(int*)g_edge_metadata(e));
++j;
}
mu_assert_int_eq(4, j);
}
MU_TEST(test_iterate_edges_large) {
__add_vertices(g, 5);
unsigned int i, j = 0;
for (i = 0; i < 65; i++) { /* add 64 edges to the same vertex */
__add_edge(g, 0, i % 5, i);
}
mu_assert_int_eq(65, g_num_edges(g));
edge_t e;
vertex_t v = g_vertex_get(g, 0);
g_iterate_edges(v, e, i) {
// mu_assert_int_eq(i, *(int*)g_edge_metadata(e));
++j;
}
mu_assert_int_eq(65, j);
}
MU_TEST(test_iterate_edges_some_removed) {
__add_vertices(g, 5);
__add_edge(g, 0, 1, 0);
__add_edge(g, 0, 2, 1);
__add_edge(g, 0, 3, 2);
__add_edge(g, 0, 4, 3);
edge_t t = g_edge_remove(g, 1);
g_edge_free(t);
t = g_edge_remove(g, 2);
g_edge_free(t);
unsigned int i, j = 0;
edge_t e;
vertex_t v = g_vertex_get(g, 0);
/* wierd test, but should be true since we should have vals 0 and 3 left */
g_iterate_edges(v, e, i) {
mu_assert_int_eq(i * 3, *(int*)g_edge_metadata(e));
++j;
}
mu_assert_int_eq(2, j);
}
MU_TEST(test_iterate_edges_some_removed_add_back) {
__add_vertices(g, 5);
__add_edge(g, 0, 1, 0);
__add_edge(g, 0, 2, 1);
__add_edge(g, 0, 3, 2);
__add_edge(g, 0, 4, 3);
edge_t t = g_edge_remove(g, 1);
g_edge_free(t);
t = g_edge_remove(g, 2);
g_edge_free(t);
__add_edge(g, 0, 4, 6); /* this function just turns the last int into a pointer for metadata */
unsigned int i, j = 0;
edge_t e;
vertex_t v = g_vertex_get(g, 0);
/* wierd test, but should be true since we should have vals 0, 3, and 6 left */
g_iterate_edges(v, e, i) {
mu_assert_int_eq(i * 3, *(int*)g_edge_metadata(e));
++j;
}
mu_assert_int_eq(3, j);
}
MU_TEST(test_g_vertex_edge_error) {
__add_vertices(g, 5);
__add_edge(g, 0, 1, 0);
vertex_t v = g_vertex_get(g, 0);
mu_assert_null(g_vertex_edge(v, 1));
mu_assert_null(g_vertex_edge(v, 17));
}
/*******************************************************************************
* Test iterating with breadth and depth
*******************************************************************************/
MU_TEST(test_g_breadth_first_traverse) {
__add_vertices(g, 15);
__add_edge(g, 0, 1, 0);
__add_edge(g, 0, 3, 0);
__add_edge(g, 0, 2, 0);
__add_edge(g, 1, 4, 0);
__add_edge(g, 1, 5, 0);
__add_edge(g, 2, 9, 0);
__add_edge(g, 3, 10, 0);
__add_edge(g, 10, 6, 0);
__add_edge(g, 4, 8, 0);
__add_edge(g, 4, 12, 0);
__add_edge(g, 6, 14, 0);
__add_edge(g, 9, 13, 0);
__add_edge(g, 9, 1, 0);
int answers[] = {0, 1, 3, 2, 4, 5, 10, 9, 8, 12, 6, 13, 14};
unsigned int len, i;
unsigned int* res = g_breadth_first_traverse(g, g_vertex_get(g, 0), &len);
mu_assert_int_eq(13, len);
for (i = 0; i < len; i++) {
mu_assert_int_eq(answers[i], res[i]);
}
free(res);
}
MU_TEST(test_g_depth_first_traverse) {
__add_vertices(g, 15);
__add_edge(g, 0, 1, 0);
__add_edge(g, 0, 3, 0);
__add_edge(g, 0, 2, 0);
__add_edge(g, 1, 4, 0);
__add_edge(g, 1, 5, 0);
__add_edge(g, 2, 9, 0);
__add_edge(g, 3, 10, 0);
__add_edge(g, 10, 6, 0);
__add_edge(g, 4, 8, 0);
__add_edge(g, 4, 12, 0);
__add_edge(g, 6, 14, 0);
__add_edge(g, 9, 13, 0);
__add_edge(g, 9, 1, 0);
int answers[] = {0, 1, 4, 8, 12, 5, 3, 10, 6, 14, 2, 9, 13};
unsigned int len, i;
unsigned int* res = g_depth_first_traverse(g, g_vertex_get(g, 0), &len);
mu_assert_int_eq(13, len);
for (i = 0; i < len; i++) {
mu_assert_int_eq(answers[i], res[i]);
}
free(res);
}
/*******************************************************************************
* Test Suite Setup
*******************************************************************************/
MU_TEST_SUITE(test_suite) {
MU_SUITE_CONFIGURE(&test_setup, &test_teardown);
MU_RUN_TEST(test_default_setup);
/* add & remove vertices */
MU_RUN_TEST(test_add_vertices);
MU_RUN_TEST(test_add_verticies_idx_error);
MU_RUN_TEST(test_add_vertex_large_idx);
MU_RUN_TEST(test_remove_vertices);
MU_RUN_TEST(test_vertices_growth);
MU_RUN_TEST(test_updating_vertex_metadata);
MU_RUN_TEST(test_get_vertex_errors);
/* add & remove edges */
MU_RUN_TEST(test_add_edges);
MU_RUN_TEST(test_remove_edges);
MU_RUN_TEST(test_remove_edges_src);
MU_RUN_TEST(test_edges_growth);
MU_RUN_TEST(test_edge_add_error);
MU_RUN_TEST(test_edge_remove_error);
MU_RUN_TEST(test_edge_get_error);
MU_RUN_TEST(test_g_vertex_edge_error);
/* Iteration tests */
MU_RUN_TEST(test_iterate_vertices_all_there);
MU_RUN_TEST(test_iterate_vertices_some_removed);
MU_RUN_TEST(test_iterate_edges);
MU_RUN_TEST(test_iterate_edges_large);
MU_RUN_TEST(test_updating_edge_metadata);
MU_RUN_TEST(test_iterate_edges_some_removed);
MU_RUN_TEST(test_iterate_edges_some_removed_add_back);
/* Traversals */
MU_RUN_TEST(test_g_breadth_first_traverse);
MU_RUN_TEST(test_g_depth_first_traverse);
}
int main() {
MU_RUN_SUITE(test_suite);
MU_REPORT();
printf("Number failed tests: %d\n", minunit_fail);
return minunit_fail;
}
/* Private Functions */
static char* __str_duplicate(const char* s) {
size_t len = strlen(s);
char* buf = (char*)malloc((len + 1) * sizeof(char));
if (buf == NULL)
return NULL;
strcpy(buf, s);
buf[len] = '\0';
return buf;
}
static void __add_vertices(graph_t g, int num) {
int i;
#pragma omp parallel for
for (i = 0; i < num; i++) {
int* q = (int*)calloc(1, sizeof(int));
*q = i;
g_vertex_add_alt(g, i, q);
}
}
static void __add_edge(graph_t g, unsigned int src, unsigned int dest, int val) {
int* q = (int*)calloc(1, sizeof(int));
*q = val;
g_edge_add(g, src, dest, q);
}
|
GB_unaryop__abs_int8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int8_uint32
// op(A') function: GB_tran__abs_int8_uint32
// C type: int8_t
// A type: uint32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int8_uint32
(
int8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-begin-declare-variant_addr_1.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s
// expected-no-diagnostics
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(implementation={vendor(llvm)})
int also_after(void) {
return 1;
}
int also_before(void) {
return 2;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
int test(int (*fd)(void)) {
return fd();
}
int main() {
// Should return 0.
return test(also_after) +
test(also_before) +
test(&also_after) +
test(&also_before);
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CHECK-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:24:1> line:22:5 used test 'int (int (*)({{.*}}))'
// CHECK-NEXT: | |-ParmVarDecl [[ADDR_23:0x[a-z0-9]*]] <col:10, col:24> col:16 used fd 'int (*)({{.*}})'
// CHECK-NEXT: | `-CompoundStmt [[ADDR_24:0x[a-z0-9]*]] <col:27, line:24:1>
// CHECK-NEXT: | `-ReturnStmt [[ADDR_25:0x[a-z0-9]*]] <line:23:3, col:13>
// CHECK-NEXT: | `-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:13> 'int'
// CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <LValueToRValue>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' {{.*}}ParmVar [[ADDR_23]] 'fd' 'int (*)({{.*}})'
// CHECK-NEXT: `-FunctionDecl [[ADDR_29:0x[a-z0-9]*]] <line:25:1, line:31:1> line:25:5 main 'int ({{.*}})'
// CHECK-NEXT: `-CompoundStmt [[ADDR_30:0x[a-z0-9]*]] <col:12, line:31:1>
// CHECK-NEXT: `-ReturnStmt [[ADDR_31:0x[a-z0-9]*]] <line:27:3, line:30:27>
// CHECK-NEXT: `-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:10, line:30:27> 'int' '+'
// CHECK-NEXT: |-BinaryOperator [[ADDR_33:0x[a-z0-9]*]] <line:27:10, line:29:26> 'int' '+'
// CHECK-NEXT: | |-BinaryOperator [[ADDR_34:0x[a-z0-9]*]] <line:27:10, line:28:26> 'int' '+'
// CHECK-NEXT: | | |-CallExpr [[ADDR_35:0x[a-z0-9]*]] <line:27:10, col:25> 'int'
// CHECK-NEXT: | | | |-ImplicitCastExpr [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay>
// CHECK-NEXT: | | | | `-DeclRefExpr [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))'
// CHECK-NEXT: | | | `-ImplicitCastExpr [[ADDR_38:0x[a-z0-9]*]] <col:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_39:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: | | `-CallExpr [[ADDR_40:0x[a-z0-9]*]] <line:28:10, col:26> 'int'
// CHECK-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay>
// CHECK-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))'
// CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_43:0x[a-z0-9]*]] <col:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_44:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CHECK-NEXT: | `-CallExpr [[ADDR_45:0x[a-z0-9]*]] <line:29:10, col:26> 'int'
// CHECK-NEXT: | |-ImplicitCastExpr [[ADDR_46:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay>
// CHECK-NEXT: | | `-DeclRefExpr [[ADDR_47:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))'
// CHECK-NEXT: | `-UnaryOperator [[ADDR_48:0x[a-z0-9]*]] <col:15, col:16> 'int (*)({{.*}})' prefix '&' cannot overflow
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_49:0x[a-z0-9]*]] <col:16> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CHECK-NEXT: `-CallExpr [[ADDR_50:0x[a-z0-9]*]] <line:30:10, col:27> 'int'
// CHECK-NEXT: |-ImplicitCastExpr [[ADDR_51:0x[a-z0-9]*]] <col:10> 'int (*)(int (*)({{.*}}))' <FunctionToPointerDecay>
// CHECK-NEXT: | `-DeclRefExpr [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int (int (*)({{.*}}))' {{.*}}Function [[ADDR_22]] 'test' 'int (int (*)({{.*}}))'
// CHECK-NEXT: `-UnaryOperator [[ADDR_53:0x[a-z0-9]*]] <col:15, col:16> 'int (*)({{.*}})' prefix '&' cannot overflow
// CHECK-NEXT: `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:16> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
|
general_basis_get_amp.h | #ifndef _GENERAL_BASIS_GET_AMP_H
#define _GENERAL_BASIS_GET_AMP_H
//#include <limits>
#include "general_basis_core.h"
#include "numpy/ndarraytypes.h"
#include "misc.h"
#include "openmp.h"
//#include <complex>
namespace basis_general {
template<class I,class P=signed char>
std::complex<double> get_amp_rep(general_basis_core<I,P> *B,
const int nt,
I r, // start out with representative state and iterate over all transofmrations.
const I s, // target states to find amplitude of
double k = 0.0,
P sign = 1,
const int depth = 0
)
{
if(nt<=0){
return 1.0;
}
std::complex<double> phase_factor = 0.0;
const int per = B->pers[depth];
const double q = (2.0*M_PI*B->qs[depth])/per;
if(depth < nt-1){
for(int j=0;j<per;j++){
phase_factor += get_amp_rep(B,nt,r,s,k,sign,depth+1);
k += q;
r = B->map_state(r,depth,sign);
}
}
else{
for(int j=0;j<per;j++){
if(r==s){
phase_factor += double(sign)*std::exp(std::complex<double>(0,-k));
}
k += q;
r = B->map_state(r,depth,sign);
}
}
return phase_factor;
}
template<class I,class J,class P=signed char>
int get_amp_general(general_basis_core<I,P> *B,
I s[], // input states in the full basis
J out[], // state amplitudes of state s (full basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
int g[__GENERAL_BASIS_CORE__max_nt];
P sign=1;
I ss=s[i];
I r = B->ref_state(ss,g,sign);
double norm_r = B->check_state(r);
s[i] = r; // update state with representative
if(!check_nan(norm_r) && norm_r > 0){ // ref_state is a representative
//phase_factor = get_amp_rep(B,nt,r,ss);
out_tmp = std::sqrt(norm_r/per_factor);
}
else{
out_tmp = 0.0;
}
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
// same as get_amp_rep, but w/o calling ref_state and check_state
template<class I,class J,class P=signed char>
int get_amp_general_light(general_basis_core<I,P> *B,
I s[], // input states in the symmetry-reduced basis
J out[], // state amplitudes of state s (symmetry-reduced basis)
const npy_intp Ns // length of above arrays (should be the same)
)
{
int err=0;
double per_factor = 1.0;
int q_sum = 0; // sum of quantum numbers
const int nt = B->get_nt();
for(int i=0;i<nt;i++){
per_factor *= B->pers[i];
q_sum += std::abs(B->qs[i]);
}
const npy_intp chunk = std::max(Ns/(100*omp_get_max_threads()),(npy_intp)1); // check_state has variable workload
if(q_sum > 0 || B->fermionic){ // a non-zero quantum number, or fermionic basis => need a nontrivial phase_factor
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
I ss=s[i];
double norm_r = B->check_state(ss);
phase_factor = get_amp_rep(B,nt,ss,ss);
out_tmp = phase_factor/std::sqrt(norm_r * per_factor);
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
else{
#pragma omp parallel for schedule(dynamic,chunk)
for(npy_intp i=0;i<Ns;i++){
if(err == 0){
std::complex<double> phase_factor, out_tmp;
double norm_r = B->check_state(s[i]);
out_tmp = std::sqrt(norm_r/per_factor);
int local_err = check_imag(out_tmp, &out[i]); // compute and assign amplitude in full basis
if(local_err){
#pragma omp critical
err = local_err;
}
}
}
}
return err;
}
}
#endif
|
region2.c | #include <omp.h>
#include <assert.h>
#define N 10
int main (int argc, char * argv[]){
int i;
int a[N];
int sum = 0;
#pragma omp parallel
{
#pragma omp barrier
sum = 0;
#pragma omp barrier
if (i > 0) {
#pragma omp barrier
sum = sum + 1;
} else {
#pragma omp barrier
sum = N;
}
#pragma omp barrier
i = sum;
}
#pragma omp parallel
{
sum = 0;
if (i > 0) {
sum = sum + 1;
} else {
sum = N;
}
i = sum;
}
#pragma omp parallel
{
sum = 0;
if (i > 0) {
#pragma omp barrier
sum = sum + 1;
} else {
#pragma omp barrier
sum = N;
}
i = sum;
}
#pragma omp parallel
{
#pragma omp barrier
sum = 0;
if (i > 0) {
sum = sum + 1;
} else {
sum = N;
}
#pragma omp barrier
i = sum;
}
#pragma omp parallel
{
sum = 0;
#pragma omp barrier
if (i > 0) {
sum = sum + 1;
} else {
#pragma omp barrier
sum = N;
}
#pragma omp barrier
i = sum;
}
#pragma omp parallel
{
#pragma omp barrier
sum = 0;
#pragma omp barrier
if (i > 0) {
#pragma omp barrier
sum = sum + 1;
} else {
sum = N;
}
i = sum;
}
}
|
fftw++.h | /* Fast Fourier transform C++ header class for the FFTW3 Library
Copyright (C) 2004-16
John C. Bowman, University of Alberta
Malcolm Roberts, University of Strasbourg
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#ifndef __fftwpp_h__
#define __fftwpp_h__ 1
#define __FFTWPP_H_VERSION__ 2.05
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <fftw3.h>
#include <cerrno>
#include <map>
#ifndef _OPENMP
#ifndef FFTWPP_SINGLE_THREAD
#define FFTWPP_SINGLE_THREAD
#endif
#endif
#ifndef FFTWPP_SINGLE_THREAD
#include <omp.h>
#endif
inline int get_thread_num()
{
#ifdef FFTWPP_SINGLE_THREAD
return 0;
#else
return omp_get_thread_num();
#endif
}
inline int get_max_threads()
{
#ifdef FFTWPP_SINGLE_THREAD
return 1;
#else
return omp_get_max_threads();
#endif
}
#ifndef FFTWPP_SINGLE_THREAD
#define PARALLEL(code) \
if(threads > 1) { \
_Pragma("omp parallel for num_threads(threads)") \
code \
} else { \
code \
}
#else
#define PARALLEL(code) \
{ \
code \
}
#endif
#ifndef __Complex_h__
#include <complex>
typedef std::complex<double> Complex;
#endif
#include "seconds.h"
#include "statistics.h"
#include "align.h"
namespace fftwpp {
// Obsolete names:
#define FFTWComplex ComplexAlign
#define FFTWdouble doubleAlign
#define FFTWdelete deleteAlign
class fftw;
extern "C" fftw_plan Planner(fftw *F, Complex *in, Complex *out);
void LoadWisdom();
void SaveWisdom();
extern const char *inout;
struct threaddata {
unsigned int threads;
double mean;
double stdev;
threaddata() : threads(0), mean(0.0), stdev(0.0) {}
threaddata(unsigned int threads, double mean, double stdev) :
threads(threads), mean(mean), stdev(stdev) {}
};
class fftw;
class ThreadBase
{
protected:
unsigned int threads;
unsigned int innerthreads;
public:
ThreadBase();
ThreadBase(unsigned int threads) : threads(threads) {}
void Threads(unsigned int nthreads) {threads=nthreads;}
unsigned int Threads() {return threads;}
void multithread(unsigned int nx) {
if(nx >= threads) {
innerthreads=1;
} else {
innerthreads=threads;
threads=1;
}
}
};
inline unsigned int realsize(unsigned int n, Complex *in, Complex *out=NULL)
{
return (!out || in == out) ? 2*(n/2+1) : n;
}
inline unsigned int realsize(unsigned int n, Complex *in, double *out)
{
return realsize(n,in,(Complex *) out);
}
inline unsigned int realsize(unsigned int n, double *in, Complex *out)
{
return realsize(n,(Complex *) in,out);
}
// Base clase for fft routines
//
class fftw : public ThreadBase {
protected:
unsigned int doubles; // number of double precision values in dataset
int sign;
unsigned int threads;
double norm;
fftw_plan plan;
bool inplace;
unsigned int Dist(unsigned int n, size_t stride, size_t dist) {
return dist ? dist : ((stride == 1) ? n : 1);
}
static const double twopi;
public:
static unsigned int effort;
static unsigned int maxthreads;
static double testseconds;
static const char *WisdomName;
static fftw_plan (*planner)(fftw *f, Complex *in, Complex *out);
virtual unsigned int Threads() {return threads;}
static const char *oddshift;
// Inplace shift of Fourier origin to (nx/2,0) for even nx.
static void Shift(Complex *data, unsigned int nx, unsigned int ny,
unsigned int threads) {
unsigned int nyp=ny/2+1;
unsigned int stop=nx*nyp;
if(nx % 2 == 0) {
unsigned int inc=2*nyp;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=nyp; i < stop; i += inc) {
Complex *p=data+i;
for(unsigned int j=0; j < nyp; j++) p[j]=-p[j];
}
} else {
std::cerr << oddshift << std::endl;
exit(1);
}
}
// Out-of-place shift of Fourier origin to (nx/2,0) for even nx.
static void Shift(double *data, unsigned int nx, unsigned int ny,
unsigned int threads) {
if(nx % 2 == 0) {
unsigned int stop=nx*ny;
unsigned int inc=2*ny;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=ny; i < stop; i += inc) {
double *p=data+i;
for(unsigned int j=0; j < ny; j++) p[j]=-p[j];
}
} else {
std::cerr << oddshift << std::endl;
exit(1);
}
}
// Inplace shift of Fourier origin to (nx/2,ny/2,0) for even nx and ny.
static void Shift(Complex *data, unsigned int nx, unsigned int ny,
unsigned int nz, unsigned int threads) {
unsigned int nzp=nz/2+1;
unsigned int nyzp=ny*nzp;
if(nx % 2 == 0 && ny % 2 == 0) {
unsigned int pinc=2*nzp;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; i++) {
Complex *pstart=data+i*nyzp;
Complex *pstop=pstart+nyzp;
for(Complex *p=pstart+(1-(i % 2))*nzp; p < pstop; p += pinc) {
for(unsigned int k=0; k < nzp; k++) p[k]=-p[k];
}
}
} else {
std::cerr << oddshift << " or odd ny" << std::endl;
exit(1);
}
}
// Out-of-place shift of Fourier origin to (nx/2,ny/2,0) for even nx and ny.
static void Shift(double *data, unsigned int nx, unsigned int ny,
unsigned int nz, unsigned int threads) {
unsigned int nyz=ny*nz;
if(nx % 2 == 0 && ny % 2 == 0) {
unsigned int pinc=2*nz;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; i++) {
double *pstart=data+i*nyz;
double *pstop=pstart+nyz;
for(double *p=pstart+(1-(i % 2))*nz; p < pstop; p += pinc) {
for(unsigned int k=0; k < nz; k++) p[k]=-p[k];
}
}
} else {
std::cerr << oddshift << " or odd ny" << std::endl;
exit(1);
}
}
fftw() : plan(NULL) {}
fftw(unsigned int doubles, int sign, unsigned int threads,
unsigned int n=0) :
doubles(doubles), sign(sign), threads(threads),
norm(1.0/(n ? n : doubles/2)), plan(NULL) {
#ifndef FFTWPP_SINGLE_THREAD
fftw_init_threads();
#endif
}
virtual ~fftw() {
if(plan) fftw_destroy_plan(plan);
}
virtual fftw_plan Plan(Complex *in, Complex *out) {return NULL;};
inline void CheckAlign(Complex *p, const char *s) {
if((size_t) p % sizeof(Complex) == 0) return;
std::cerr << "WARNING: " << s << " array is not " << sizeof(Complex)
<< "-byte aligned: address " << p << std::endl;
}
void noplan() {
std::cerr << "Unable to construct FFTW plan" << std::endl;
exit(1);
}
static void planThreads(unsigned int threads) {
#ifndef FFTWPP_SINGLE_THREAD
omp_set_num_threads(threads);
fftw_plan_with_nthreads(threads);
#endif
}
threaddata time(fftw_plan plan1, fftw_plan planT, Complex *in, Complex *out,
unsigned int Threads) {
utils::statistics S,ST;
double stop=utils::totalseconds()+testseconds;
threads=1;
plan=plan1;
fft(in,out);
threads=Threads;
plan=planT;
fft(in,out);
unsigned int N=1;
for(;;) {
double t0=utils::totalseconds();
threads=1;
plan=plan1;
for(unsigned int i=0; i < N; ++i)
fft(in,out);
double t1=utils::totalseconds();
threads=Threads;
plan=planT;
for(unsigned int i=0; i < N; ++i)
fft(in,out);
double t=utils::totalseconds();
S.add(t1-t0);
ST.add(t-t1);
if(S.mean() < 100.0/CLOCKS_PER_SEC) N *= 2;
if(S.count() >= 10) {
double error=S.stdev();
double diff=ST.mean()-S.mean();
if(diff >= 0.0 || t > stop) {
threads=1;
plan=plan1;
fftw_destroy_plan(planT);
break;
}
if(diff < -error) {
threads=Threads;
fftw_destroy_plan(plan1);
break;
}
}
}
return threaddata(threads,S.mean(),S.stdev());
}
virtual threaddata lookup(bool inplace, unsigned int threads) {
return threaddata();
}
virtual void store(bool inplace, const threaddata& data) {}
inline Complex *CheckAlign(Complex *in, Complex *out, bool constructor=true)
{
#ifndef NO_CHECK_ALIGN
CheckAlign(in,constructor ? "constructor input" : "input");
if(out) CheckAlign(out,constructor ? "constructor output" : "output");
else out=in;
#else
if(!out) out=in;
#endif
return out;
}
threaddata Setup(Complex *in, Complex *out=NULL) {
bool alloc=!in;
if(alloc) in=utils::ComplexAlign((doubles+1)/2);
out=CheckAlign(in,out);
inplace=(out==in);
threaddata data;
unsigned int Threads=threads;
if(threads > 1) data=lookup(inplace,threads);
threads=data.threads > 0 ? data.threads : 1;
planThreads(threads);
plan=(*planner)(this,in,out);
if(!plan) noplan();
fftw_plan planT;
if(fftw::maxthreads > 1) {
threads=Threads;
planThreads(threads);
planT=(*planner)(this,in,out);
if(data.threads == 0) {
if(planT)
data=time(plan,planT,in,out,threads);
else noplan();
store(inplace,threaddata(threads,data.mean,data.stdev));
}
}
if(alloc) Array::deleteAlign(in,(doubles+1)/2);
return data;
}
threaddata Setup(Complex *in, double *out) {
return Setup(in,(Complex *) out);
}
threaddata Setup(double *in, Complex *out=NULL) {
return Setup((Complex *) in,out);
}
virtual void Execute(Complex *in, Complex *out, bool=false) {
fftw_execute_dft(plan,(fftw_complex *) in,(fftw_complex *) out);
}
Complex *Setout(Complex *in, Complex *out) {
out=CheckAlign(in,out,false);
if(inplace ^ (out == in)) {
std::cerr << "ERROR: fft " << inout << std::endl;
exit(1);
}
return out;
}
void fft(Complex *in, Complex *out=NULL) {
out=Setout(in,out);
Execute(in,out);
}
void fft(double *in, Complex *out=NULL) {
fft((Complex *) in,out);
}
void fft(Complex *in, double *out) {
fft(in,(Complex *) out);
}
void fft0(Complex *in, Complex *out=NULL) {
out=Setout(in,out);
Execute(in,out,true);
}
void fft0(double *in, Complex *out=NULL) {
fft0((Complex *) in,out);
}
void fft0(Complex *in, double *out) {
fft0(in,(Complex *) out);
}
void Normalize(Complex *out) {
unsigned int stop=doubles/2;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < stop; i++) out[i] *= norm;
}
void Normalize(double *out) {
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < doubles; i++) out[i] *= norm;
}
virtual void fftNormalized(Complex *in, Complex *out=NULL, bool shift=false)
{
out=Setout(in,out);
Execute(in,out,shift);
Normalize(out);
}
void fftNormalized(Complex *in, double *out, bool shift=false) {
out=(double *) Setout(in,(Complex *) out);
Execute(in,(Complex *) out,shift);
Normalize(out);
}
void fftNormalized(double *in, Complex *out, bool shift=false) {
fftNormalized((Complex *) in,out,shift);
}
template<class I, class O>
void fft0Normalized(I in, O out) {
fftNormalized(in,out,true);
}
template<class O>
void Normalize(unsigned int nx, unsigned int M, size_t ostride,
size_t odist, O *out) {
unsigned int stop=nx*ostride;
O *outMdist=out+M*odist;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < stop; i += ostride) {
O *pstop=outMdist+i;
for(O *p=out+i; p < pstop; p += odist) {
*p *= norm;
}
}
}
template<class I, class O>
void fftNormalized(unsigned int nx, unsigned int M, size_t ostride,
size_t odist, I *in, O *out=NULL, bool shift=false) {
out=(O *) Setout((Complex *) in,(Complex *) out);
Execute((Complex *) in,(Complex *) out,shift);
Normalize(nx,M,ostride,odist,out);
}
}; // class fftw
class Transpose {
fftw_plan plan;
fftw_plan plan2;
unsigned int a,b;
unsigned int nlength,mlength;
unsigned int ilast,jlast;
unsigned int rows,cols;
unsigned int threads;
bool inplace;
unsigned int size;
public:
template<class T>
Transpose(unsigned int rows, unsigned int cols, unsigned int length,
T *in, T *out=NULL, unsigned int threads=fftw::maxthreads) :
rows(rows), cols(cols), threads(threads) {
size=sizeof(T);
if(size % sizeof(double) != 0) {
std::cerr << "ERROR: Transpose is not implemented for type of size "
<< size;
exit(1);
}
plan=plan2=NULL;
if(rows == 0 || cols == 0) return;
size /= sizeof(double);
length *= size;
if(!out) out=in;
inplace=(out==in);
if(inplace) {
fftw::planThreads(threads);
threads=1;
} else fftw::planThreads(1);
fftw_iodim dims[3];
a=std::min(rows,threads);
b=std::min(cols,threads/a);
unsigned int n=utils::ceilquotient(rows,a);
unsigned int m=utils::ceilquotient(cols,b);
// If rows <= threads then a=rows and n=1.
// If rows >= threads then b=1 and m=cols.
nlength=n*length;
mlength=m*length;
dims[0].n=n;
dims[0].is=cols*length;
dims[0].os=length;
dims[1].n=m;
dims[1].is=length;
dims[1].os=rows*length;
dims[2].n=length;
dims[2].is=1;
dims[2].os=1;
// A plan with rank=0 is a transpose.
plan=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out,
NULL,fftw::effort);
ilast=a;
jlast=b;
if(n*a > rows) { // Only happens when rows > threads.
a=utils::ceilquotient(rows,n);
ilast=a-1;
dims[0].n=rows-n*ilast;
plan2=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out,
NULL,fftw::effort);
} else { // Only happens when rows < threads.
if(m*b > cols) {
b=utils::ceilquotient(cols,m);
jlast=b-1;
dims[1].n=cols-m*jlast;
plan2=fftw_plan_guru_r2r(0,NULL,3,dims,(double *) in,(double *) out,
NULL,fftw::effort);
}
}
}
~Transpose() {
if(plan) fftw_destroy_plan(plan);
if(plan2) fftw_destroy_plan(plan2);
}
template<class T>
void transpose(T *in, T *out=NULL) {
if(rows == 0 || cols == 0) return;
if(!out) out=in;
if(inplace ^ (out == in)) {
std::cerr << "ERROR: Transpose " << inout << std::endl;
exit(1);
}
#ifndef FFTWPP_SINGLE_THREAD
if(a > 1) {
if(b > 1) {
int A=a, B=b;
#pragma omp parallel for num_threads(A)
for(unsigned int i=0; i < a; ++i) {
unsigned int I=i*nlength;
#pragma omp parallel for num_threads(B)
for(unsigned int j=0; j < b; ++j) {
unsigned int J=j*mlength;
fftw_execute_r2r((i < ilast && j < jlast) ? plan : plan2,
(double *) in+cols*I+J,
(double *) out+rows*J+I);
}
}
} else {
int A=a;
#pragma omp parallel for num_threads(A)
for(unsigned int i=0; i < a; ++i) {
unsigned int I=i*nlength;
fftw_execute_r2r(i < ilast ? plan : plan2,
(double *) in+cols*I,(double *) out+I);
}
}
} else if(b > 1) {
int B=b;
#pragma omp parallel for num_threads(B)
for(unsigned int j=0; j < b; ++j) {
unsigned int J=j*mlength;
fftw_execute_r2r(j < jlast ? plan : plan2,
(double *) in+J,(double *) out+rows*J);
}
} else
#endif
fftw_execute_r2r(plan,(double *) in,(double*) out);
}
};
template<class T, class L>
class Threadtable {
public:
typedef std::map<T,threaddata,L> Table;
threaddata Lookup(Table& table, T key) {
typename Table::iterator p=table.find(key);
return p == table.end() ? threaddata() : p->second;
}
void Store(Table& threadtable, T key, const threaddata& data) {
threadtable[key]=data;
}
};
struct keytype1 {
unsigned int nx;
unsigned int threads;
bool inplace;
keytype1(unsigned int nx, unsigned int threads, bool inplace) :
nx(nx), threads(threads), inplace(inplace) {}
};
struct keyless1 {
bool operator()(const keytype1& a, const keytype1& b) const {
return a.nx < b.nx || (a.nx == b.nx &&
(a.threads < b.threads || (a.threads == b.threads &&
a.inplace < b.inplace)));
}
};
struct keytype2 {
unsigned int nx;
unsigned int ny;
unsigned int threads;
bool inplace;
keytype2(unsigned int nx, unsigned int ny, unsigned int threads,
bool inplace) :
nx(nx), ny(ny), threads(threads), inplace(inplace) {}
};
struct keyless2 {
bool operator()(const keytype2& a, const keytype2& b) const {
return a.nx < b.nx || (a.nx == b.nx &&
(a.ny < b.ny || (a.ny == b.ny &&
(a.threads < b.threads ||
(a.threads == b.threads &&
a.inplace < b.inplace)))));
}
};
struct keytype3 {
unsigned int nx;
unsigned int ny;
unsigned int nz;
unsigned int threads;
bool inplace;
keytype3(unsigned int nx, unsigned int ny, unsigned int nz,
unsigned int threads, bool inplace) :
nx(nx), ny(ny), nz(nz), threads(threads), inplace(inplace) {}
};
struct keyless3 {
bool operator()(const keytype3& a, const keytype3& b) const {
return a.nx < b.nx || (a.nx == b.nx &&
(a.ny < b.ny || (a.ny == b.ny &&
(a.nz < b.nz ||
(a.nz == b.nz &&
(a.threads < b.threads ||
(a.threads == b.threads &&
a.inplace < b.inplace)))))));
}
};
// Compute the complex Fourier transform of n complex values.
// Before calling fft(), the arrays in and out (which may coincide) must be
// allocated as Complex[n].
//
// Out-of-place usage:
//
// fft1d Forward(n,-1,in,out);
// Forward.fft(in,out);
//
// fft1d Backward(n,1,in,out);
// Backward.fft(in,out);
//
// fft1d Backward(n,1,in,out);
// Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in);
//
// In-place usage:
//
// fft1d Forward(n,-1);
// Forward.fft(in);
//
// fft1d Backward(n,1);
// Backward.fft(in);
//
class fft1d : public fftw, public Threadtable<keytype1,keyless1> {
unsigned int nx;
static Table threadtable;
public:
fft1d(unsigned int nx, int sign, Complex *in=NULL, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx,sign,threads), nx(nx) {Setup(in,out);}
#ifdef __Array_h__
fft1d(int sign, const Array::array1<Complex>& in,
const Array::array1<Complex>& out=Array::NULL1,
unsigned int threads=maxthreads)
: fftw(2*in.Nx(),sign,threads), nx(in.Nx()) {Setup(in,out);}
#endif
threaddata lookup(bool inplace, unsigned int threads) {
return this->Lookup(threadtable,keytype1(nx,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
this->Store(threadtable,keytype1(nx,data.threads,inplace),data);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_1d(nx,(fftw_complex *) in,(fftw_complex *) out,
sign,effort);
}
};
template<class I, class O>
class fftwblock : public virtual fftw {
public:
int nx;
unsigned int M;
size_t istride,ostride;
size_t idist,odist;
fftw_plan plan1,plan2;
unsigned int T,Q,R;
fftwblock(unsigned int nx, unsigned int M,
size_t istride, size_t ostride, size_t idist, size_t odist,
Complex *in, Complex *out, unsigned int Threads)
: fftw(), nx(nx), M(M), istride(istride), ostride(ostride),
idist(Dist(nx,istride,idist)), odist(Dist(nx,ostride,odist)),
plan1(NULL), plan2(NULL) {
T=1;
Q=M;
R=0;
threaddata S1=Setup(in,out);
fftw_plan planT1=plan;
if(fftw::maxthreads > 1) {
if(Threads > 1) {
T=std::min(M,Threads);
Q=T > 0 ? M/T : 0;
R=M-Q*T;
threads=Threads;
threaddata ST=Setup(in,out);
if(R > 0 && threads == 1 && plan1 != plan2) {
fftw_destroy_plan(plan2);
plan2=plan1;
}
if(ST.mean > S1.mean-S1.stdev) { // Use FFTW's multi-threading
fftw_destroy_plan(plan);
if(R > 0) {
fftw_destroy_plan(plan2);
plan2=NULL;
}
T=1;
Q=M;
R=0;
plan=planT1;
threads=S1.threads;
} else { // Do the multi-threading ourselves
fftw_destroy_plan(planT1);
threads=ST.threads;
}
} else
Setup(in,out); // Synchronize wisdom
}
}
fftw_plan Plan(int Q, fftw_complex *in, fftw_complex *out) {
return fftw_plan_many_dft(1,&nx,Q,in,NULL,istride,idist,
out,NULL,ostride,odist,sign,effort);
}
fftw_plan Plan(int Q, double *in, fftw_complex *out) {
return fftw_plan_many_dft_r2c(1,&nx,Q,in,NULL,istride,idist,
out,NULL,ostride,odist,effort);
}
fftw_plan Plan(int Q, fftw_complex *in, double *out) {
return fftw_plan_many_dft_c2r(1,&nx,Q,in,NULL,istride,idist,
out,NULL,ostride,odist,effort);
}
fftw_plan Plan(Complex *in, Complex *out) {
if(R > 0) {
plan2=Plan(Q+1,(I *) in,(O *) out);
if(!plan2) return NULL;
if(threads == 1) plan1=plan2;
}
return Plan(Q,(I *) in,(O *) out);
}
void Execute(fftw_plan plan, fftw_complex *in, fftw_complex *out) {
fftw_execute_dft(plan,in,out);
}
void Execute(fftw_plan plan, double *in, fftw_complex *out) {
fftw_execute_dft_r2c(plan,in,out);
}
void Execute(fftw_plan plan, fftw_complex *in, double *out) {
fftw_execute_dft_c2r(plan,in,out);
}
void Execute(Complex *in, Complex *out, bool=false) {
if(T == 1)
Execute(plan,(I *) in,(O *) out);
else {
unsigned int extra=T-R;
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(T)
#endif
for(unsigned int i=0; i < T; ++i) {
unsigned int iQ=i*Q;
if(i < extra)
Execute(plan,(I *) in+iQ*idist,(O *) out+iQ*odist);
else {
unsigned int offset=iQ+i-extra;
Execute(plan2,(I *) in+offset*idist,(O *) out+offset*odist);
}
}
}
}
unsigned int Threads() {return std::max(T,threads);}
~fftwblock() {
if(plan2) fftw_destroy_plan(plan2);
}
};
// Compute the complex Fourier transform of M complex vectors, each of
// length n.
// Before calling fft(), the arrays in and out (which may coincide) must be
// allocated as Complex[M*n].
//
// Out-of-place usage:
//
// mfft1d Forward(n,-1,M,stride,dist,in,out);
// Forward.fft(in,out);
//
// In-place usage:
//
// mfft1d Forward(n,-1,M,stride,dist);
// Forward.fft(in);
//
// Notes:
// stride is the spacing between the elements of each Complex vector;
// dist is the spacing between the first elements of the vectors.
//
//
class mfft1d : public fftwblock<fftw_complex,fftw_complex>,
public Threadtable<keytype3,keyless3> {
static Table threadtable;
public:
mfft1d(unsigned int nx, int sign, unsigned int M=1, size_t stride=1,
size_t dist=0, Complex *in=NULL, Complex *out=NULL,
unsigned int threads=maxthreads) :
fftw(2*((nx-1)*stride+(M-1)*Dist(nx,stride,dist)+1),sign,threads,nx),
fftwblock<fftw_complex,fftw_complex>
(nx,M,stride,stride,dist,dist,in,out,threads) {}
mfft1d(unsigned int nx, int sign, unsigned int M,
size_t istride, size_t ostride, size_t idist, size_t odist,
Complex *in=NULL, Complex *out=NULL, unsigned int threads=maxthreads):
fftw(std::max(2*((nx-1)*istride+(M-1)*Dist(nx,istride,idist)+1),
2*((nx-1)*ostride+(M-1)*Dist(nx,ostride,odist)+1)),sign,
threads, nx),
fftwblock<fftw_complex,fftw_complex>(nx,M,istride,ostride,idist,odist,in,
out,threads) {}
threaddata lookup(bool inplace, unsigned int threads) {
return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data);
}
};
// Compute the complex Fourier transform of n real values, using phase sign -1.
// Before calling fft(), the array in must be allocated as double[n] and
// the array out must be allocated as Complex[n/2+1]. The arrays in and out
// may coincide, allocated as Complex[n/2+1].
//
// Out-of-place usage:
//
// rcfft1d Forward(n,in,out);
// Forward.fft(in,out);
//
// In-place usage:
//
// rcfft1d Forward(n);
// Forward.fft(out);
//
// Notes:
// in contains the n real values stored as a Complex array;
// out contains the first n/2+1 Complex Fourier values.
//
class rcfft1d : public fftw, public Threadtable<keytype1,keyless1> {
unsigned int nx;
static Table threadtable;
public:
rcfft1d(unsigned int nx, Complex *out=NULL, unsigned int threads=maxthreads)
: fftw(2*(nx/2+1),-1,threads,nx), nx(nx) {Setup(out,(double*) NULL);}
rcfft1d(unsigned int nx, double *in, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*(nx/2+1),-1,threads,nx), nx(nx) {Setup(in,out);}
threaddata lookup(bool inplace, unsigned int threads) {
return Lookup(threadtable,keytype1(nx,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
Store(threadtable,keytype1(nx,data.threads,inplace),data);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_r2c_1d(nx,(double *) in,(fftw_complex *) out, effort);
}
void Execute(Complex *in, Complex *out, bool=false) {
fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out);
}
};
// Compute the real inverse Fourier transform of the n/2+1 Complex values
// corresponding to the non-negative part of the frequency spectrum, using
// phase sign +1.
// Before calling fft(), the array in must be allocated as Complex[n/2+1]
// and the array out must be allocated as double[n]. The arrays in and out
// may coincide, allocated as Complex[n/2+1].
//
// Out-of-place usage (input destroyed):
//
// crfft1d Backward(n,in,out);
// Backward.fft(in,out);
//
// In-place usage:
//
// crfft1d Backward(n);
// Backward.fft(in);
//
// Notes:
// in contains the first n/2+1 Complex Fourier values.
// out contains the n real values stored as a Complex array;
//
class crfft1d : public fftw, public Threadtable<keytype1,keyless1> {
unsigned int nx;
static Table threadtable;
public:
crfft1d(unsigned int nx, double *out=NULL, unsigned int threads=maxthreads)
: fftw(2*(nx/2+1),1,threads,nx), nx(nx) {Setup(out);}
crfft1d(unsigned int nx, Complex *in, double *out=NULL,
unsigned int threads=maxthreads)
: fftw(realsize(nx,in,out),1,threads,nx), nx(nx) {Setup(in,out);}
threaddata lookup(bool inplace, unsigned int threads) {
return Lookup(threadtable,keytype1(nx,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
Store(threadtable,keytype1(nx,data.threads,inplace),data);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_c2r_1d(nx,(fftw_complex *) in,(double *) out,effort);
}
void Execute(Complex *in, Complex *out, bool=false) {
fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out);
}
};
// Compute the real Fourier transform of M real vectors, each of length n,
// using phase sign -1. Before calling fft(), the array in must be
// allocated as double[M*n] and the array out must be allocated as
// Complex[M*(n/2+1)]. The arrays in and out may coincide,
// allocated as Complex[M*(n/2+1)].
//
// Out-of-place usage:
//
// mrcfft1d Forward(n,M,istride,ostride,idist,odist,in,out);
// Forward.fft(in,out);
//
// In-place usage:
//
// mrcfft1d Forward(n,M,istride,ostride,idist,odist);
// Forward.fft(out);
//
// Notes:
// istride is the spacing between the elements of each real vector;
// ostride is the spacing between the elements of each Complex vector;
// idist is the spacing between the first elements of the real vectors;
// odist is the spacing between the first elements of the Complex vectors;
// in contains the n real values stored as a Complex array;
// out contains the first n/2+1 Complex Fourier values.
//
class mrcfft1d : public fftwblock<double,fftw_complex>,
public Threadtable<keytype3,keyless3> {
static Table threadtable;
public:
mrcfft1d(unsigned int nx, unsigned int M,
size_t istride, size_t ostride,
size_t idist, size_t odist,
double *in=NULL, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(std::max((realsize(nx,in,out)-2)*istride+(M-1)*idist+2,
2*(nx/2*ostride+(M-1)*odist+1)),-1,threads,nx),
fftwblock<double,fftw_complex>
(nx,M,istride,ostride,idist,odist,(Complex *) in,out,threads) {}
threaddata lookup(bool inplace, unsigned int threads) {
return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data);
}
void Normalize(Complex *out) {
fftw::Normalize<Complex>(nx/2+1,M,ostride,odist,out);
}
void fftNormalized(double *in, Complex *out=NULL) {
fftw::fftNormalized<double,Complex>(nx/2+1,M,ostride,odist,in,out,false);
}
void fft0Normalized(double *in, Complex *out=NULL) {
fftw::fftNormalized<double,Complex>(nx/2+1,M,ostride,odist,in,out,true);
}
};
// Compute the real inverse Fourier transform of M complex vectors, each of
// length n/2+1, corresponding to the non-negative parts of the frequency
// spectra, using phase sign +1. Before calling fft(), the array in must be
// allocated as Complex[M*(n/2+1)] and the array out must be allocated as
// double[M*n]. The arrays in and out may coincide,
// allocated as Complex[M*(n/2+1)].
//
// Out-of-place usage (input destroyed):
//
// mcrfft1d Backward(n,M,istride,ostride,idist,odist,in,out);
// Backward.fft(in,out);
//
// In-place usage:
//
// mcrfft1d Backward(n,M,istride,ostride,idist,odist);
// Backward.fft(out);
//
// Notes:
// stride is the spacing between the elements of each Complex vector;
// dist is the spacing between the first elements of the vectors;
// in contains the first n/2+1 Complex Fourier values;
// out contains the n real values stored as a Complex array.
//
class mcrfft1d : public fftwblock<fftw_complex,double>,
public Threadtable<keytype3,keyless3> {
static Table threadtable;
public:
mcrfft1d(unsigned int nx, unsigned int M, size_t istride, size_t ostride,
size_t idist, size_t odist, Complex *in=NULL, double *out=NULL,
unsigned int threads=maxthreads)
: fftw(std::max(2*(nx/2*istride+(M-1)*idist+1),
(realsize(nx,in,out)-2)*ostride+(M-1)*odist+2),1,threads,nx),
fftwblock<fftw_complex,double>
(nx,M,istride,ostride,idist,odist,in,(Complex *) out,threads) {}
threaddata lookup(bool inplace, unsigned int threads) {
return Lookup(threadtable,keytype3(nx,Q,R,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
Store(threadtable,keytype3(nx,Q,R,data.threads,inplace),data);
}
void Normalize(double *out) {
fftw::Normalize<double>(nx,M,ostride,odist,out);
}
void fftNormalized(Complex *in, double *out=NULL) {
fftw::fftNormalized<Complex,double>(nx,M,ostride,odist,in,out,false);
}
void fft0Normalized(Complex *in, double *out=NULL) {
fftw::fftNormalized<Complex,double>(nx,M,ostride,odist,in,out,true);
}
};
// Compute the complex two-dimensional Fourier transform of nx times ny
// complex values. Before calling fft(), the arrays in and out (which may
// coincide) must be allocated as Complex[nx*ny].
//
// Out-of-place usage:
//
// fft2d Forward(nx,ny,-1,in,out);
// Forward.fft(in,out);
//
// fft2d Backward(nx,ny,1,in,out);
// Backward.fft(in,out);
//
// fft2d Backward(nx,ny,1,in,out);
// Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in);
//
// In-place usage:
//
// fft2d Forward(nx,ny,-1);
// Forward.fft(in);
//
// fft2d Backward(nx,ny,1);
// Backward.fft(in);
//
// Note:
// in[ny*i+j] contains the ny Complex values for each i=0,...,nx-1.
//
class fft2d : public fftw, public Threadtable<keytype2,keyless2> {
unsigned int nx;
unsigned int ny;
static Table threadtable;
public:
fft2d(unsigned int nx, unsigned int ny, int sign, Complex *in=NULL,
Complex *out=NULL, unsigned int threads=maxthreads)
: fftw(2*nx*ny,sign,threads), nx(nx), ny(ny) {Setup(in,out);}
#ifdef __Array_h__
fft2d(int sign, const Array::array2<Complex>& in,
const Array::array2<Complex>& out=Array::NULL2,
unsigned int threads=maxthreads)
: fftw(2*in.Size(),sign,threads), nx(in.Nx()), ny(in.Ny()) {
Setup(in,out);
}
#endif
threaddata lookup(bool inplace, unsigned int threads) {
return this->Lookup(threadtable,keytype2(nx,ny,threads,inplace));
}
void store(bool inplace, const threaddata& data) {
this->Store(threadtable,keytype2(nx,ny,data.threads,inplace),data);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_2d(nx,ny,(fftw_complex *) in,(fftw_complex *) out,
sign,effort);
}
void Execute(Complex *in, Complex *out, bool=false) {
fftw_execute_dft(plan,(fftw_complex *) in,(fftw_complex *) out);
}
};
// Compute the complex two-dimensional Fourier transform of nx times ny real
// values, using phase sign -1.
// Before calling fft(), the array in must be allocated as double[nx*ny] and
// the array out must be allocated as Complex[nx*(ny/2+1)]. The arrays in
// and out may coincide, allocated as Complex[nx*(ny/2+1)].
//
// Out-of-place usage:
//
// rcfft2d Forward(nx,ny,in,out);
// Forward.fft(in,out); // Origin of Fourier domain at (0,0)
// Forward.fft0(in,out); // Origin of Fourier domain at (nx/2,0);
// input destroyed.
//
// In-place usage:
//
// rcfft2d Forward(nx,ny);
// Forward.fft(in); // Origin of Fourier domain at (0,0)
// Forward.fft0(in); // Origin of Fourier domain at (nx/2,0)
//
// Notes:
// in contains the nx*ny real values stored as a Complex array;
// out contains the upper-half portion (ky >= 0) of the Complex transform.
//
class rcfft2d : public fftw {
unsigned int nx;
unsigned int ny;
public:
rcfft2d(unsigned int nx, unsigned int ny, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx*(ny/2+1),-1,threads,nx*ny), nx(nx), ny(ny) {Setup(out);}
rcfft2d(unsigned int nx, unsigned int ny, double *in, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx*(ny/2+1),-1,threads,nx*ny), nx(nx), ny(ny) {
Setup(in,out);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_r2c_2d(nx,ny,(double *) in,(fftw_complex *) out,
effort);
}
void Execute(Complex *in, Complex *out, bool shift=false) {
if(shift) {
if(inplace) Shift(in,nx,ny,threads);
else Shift((double *) in,nx,ny,threads);
}
fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out);
}
// Set Nyquist modes of even shifted transforms to zero.
void deNyquist(Complex *f) {
unsigned int nyp=ny/2+1;
if(nx % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int j=0; j < nyp; ++j)
f[j]=0.0;
if(ny % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i)
f[(i+1)*nyp-1]=0.0;
}
};
// Compute the real two-dimensional inverse Fourier transform of the
// nx*(ny/2+1) Complex values corresponding to the spectral values in the
// half-plane ky >= 0, using phase sign +1.
// Before calling fft(), the array in must be allocated as
// Complex[nx*(ny/2+1)] and the array out must be allocated as
// double[nx*ny]. The arrays in and out may coincide,
// allocated as Complex[nx*(ny/2+1)].
//
// Out-of-place usage (input destroyed):
//
// crfft2d Backward(nx,ny,in,out);
// Backward.fft(in,out); // Origin of Fourier domain at (0,0)
// Backward.fft0(in,out); // Origin of Fourier domain at (nx/2,0)
//
// In-place usage:
//
// crfft2d Backward(nx,ny);
// Backward.fft(in); // Origin of Fourier domain at (0,0)
// Backward.fft0(in); // Origin of Fourier domain at (nx/2,0)
//
// Notes:
// in contains the upper-half portion (ky >= 0) of the Complex transform;
// out contains the nx*ny real values stored as a Complex array.
//
class crfft2d : public fftw {
unsigned int nx;
unsigned int ny;
public:
crfft2d(unsigned int nx, unsigned int ny, double *out=NULL,
unsigned int threads=maxthreads) :
fftw(2*nx*(ny/2+1),1,threads,nx*ny), nx(nx), ny(ny) {Setup(out);}
crfft2d(unsigned int nx, unsigned int ny, Complex *in, double *out=NULL,
unsigned int threads=maxthreads)
: fftw(nx*realsize(ny,in,out),1,threads,nx*ny), nx(nx), ny(ny) {
Setup(in,out);
}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_c2r_2d(nx,ny,(fftw_complex *) in,(double *) out,
effort);
}
void Execute(Complex *in, Complex *out, bool shift=false) {
fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out);
if(shift) {
if(inplace) Shift(out,nx,ny,threads);
else Shift((double *) out,nx,ny,threads);
}
}
// Set Nyquist modes of even shifted transforms to zero.
void deNyquist(Complex *f) {
unsigned int nyp=ny/2+1;
if(nx % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int j=0; j < nyp; ++j)
f[j]=0.0;
if(ny % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i)
f[(i+1)*nyp-1]=0.0;
}
};
// Compute the complex three-dimensional Fourier transform of
// nx times ny times nz complex values. Before calling fft(), the arrays in
// and out (which may coincide) must be allocated as Complex[nx*ny*nz].
//
// Out-of-place usage:
//
// fft3d Forward(nx,ny,nz,-1,in,out);
// Forward.fft(in,out);
//
// fft3d Backward(nx,ny,nz,1,in,out);
// Backward.fft(in,out);
//
// fft3d Backward(nx,ny,nz,1,in,out);
// Backward.fftNormalized(in,out); // True inverse of Forward.fft(out,in);
//
// In-place usage:
//
// fft3d Forward(nx,ny,nz,-1);
// Forward.fft(in);
//
// fft3d Backward(nx,ny,nz,1);
// Backward.fft(in);
//
// Note:
// in[nz*(ny*i+j)+k] contains the (i,j,k)th Complex value,
// indexed by i=0,...,nx-1, j=0,...,ny-1, and k=0,...,nz-1.
//
class fft3d : public fftw {
unsigned int nx;
unsigned int ny;
unsigned int nz;
public:
fft3d(unsigned int nx, unsigned int ny, unsigned int nz,
int sign, Complex *in=NULL, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx*ny*nz,sign,threads), nx(nx), ny(ny), nz(nz) {Setup(in,out);}
#ifdef __Array_h__
fft3d(int sign, const Array::array3<Complex>& in,
const Array::array3<Complex>& out=Array::NULL3,
unsigned int threads=maxthreads)
: fftw(2*in.Size(),sign,threads), nx(in.Nx()), ny(in.Ny()), nz(in.Nz())
{Setup(in,out);}
#endif
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_3d(nx,ny,nz,(fftw_complex *) in,
(fftw_complex *) out, sign, effort);
}
};
// Compute the complex two-dimensional Fourier transform of
// nx times ny times nz real values, using phase sign -1.
// Before calling fft(), the array in must be allocated as double[nx*ny*nz]
// and the array out must be allocated as Complex[nx*ny*(nz/2+1)]. The
// arrays in and out may coincide, allocated as Complex[nx*ny*(nz/2+1)].
//
// Out-of-place usage:
//
// rcfft3d Forward(nx,ny,nz,in,out);
// Forward.fft(in,out); // Origin of Fourier domain at (0,0)
// Forward.fft0(in,out); // Origin of Fourier domain at (nx/2,ny/2,0);
// input destroyed
// In-place usage:
//
// rcfft3d Forward(nx,ny,nz);
// Forward.fft(in); // Origin of Fourier domain at (0,0)
// Forward.fft0(in); // Origin of Fourier domain at (nx/2,ny/2,0)
//
// Notes:
// in contains the nx*ny*nz real values stored as a Complex array;
// out contains the upper-half portion (kz >= 0) of the Complex transform.
//
class rcfft3d : public fftw {
unsigned int nx;
unsigned int ny;
unsigned int nz;
public:
rcfft3d(unsigned int nx, unsigned int ny, unsigned int nz, Complex *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx*ny*(nz/2+1),-1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz) {
Setup(out);
}
rcfft3d(unsigned int nx, unsigned int ny, unsigned int nz, double *in,
Complex *out=NULL, unsigned int threads=maxthreads)
: fftw(2*nx*ny*(nz/2+1),-1,threads,nx*ny*nz),
nx(nx), ny(ny), nz(nz) {Setup(in,out);}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_r2c_3d(nx,ny,nz,(double *) in,(fftw_complex *) out,
effort);
}
void Execute(Complex *in, Complex *out, bool shift=false) {
if(shift) {
if(inplace) Shift(in,nx,ny,nz,threads);
else Shift((double *) in,nx,ny,nz,threads);
}
fftw_execute_dft_r2c(plan,(double *) in,(fftw_complex *) out);
}
// Set Nyquist modes of even shifted transforms to zero.
void deNyquist(Complex *f) {
unsigned int nzp=nz/2+1;
unsigned int yz=ny*nzp;
if(nx % 2 == 0) {
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int k=0; k < yz; ++k)
f[k]=0.0;
}
if(ny % 2 == 0) {
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i) {
unsigned int iyz=i*yz;
for(unsigned int k=0; k < nzp; ++k)
f[iyz+k]=0.0;
}
}
if(nz % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i)
for(unsigned int j=0; j < ny; ++j)
f[i*yz+(j+1)*nzp-1]=0.0;
}
};
// Compute the real two-dimensional inverse Fourier transform of the
// nx*ny*(nz/2+1) Complex values corresponding to the spectral values in the
// half-plane kz >= 0, using phase sign +1.
// Before calling fft(), the array in must be allocated as
// Complex[nx*ny*(nz+1)/2] and the array out must be allocated as
// double[nx*ny*nz]. The arrays in and out may coincide,
// allocated as Complex[nx*ny*(nz/2+1)].
//
// Out-of-place usage (input destroyed):
//
// crfft3d Backward(nx,ny,nz,in,out);
// Backward.fft(in,out); // Origin of Fourier domain at (0,0)
// Backward.fft0(in,out); // Origin of Fourier domain at (nx/2,ny/2,0)
//
// In-place usage:
//
// crfft3d Backward(nx,ny,nz);
// Backward.fft(in); // Origin of Fourier domain at (0,0)
// Backward.fft0(in); // Origin of Fourier domain at (nx/2,ny/2,0)
//
// Notes:
// in contains the upper-half portion (kz >= 0) of the Complex transform;
// out contains the nx*ny*nz real values stored as a Complex array.
//
class crfft3d : public fftw {
unsigned int nx;
unsigned int ny;
unsigned int nz;
public:
crfft3d(unsigned int nx, unsigned int ny, unsigned int nz, double *out=NULL,
unsigned int threads=maxthreads)
: fftw(2*nx*ny*(nz/2+1),1,threads,nx*ny*nz), nx(nx), ny(ny), nz(nz)
{Setup(out);}
crfft3d(unsigned int nx, unsigned int ny, unsigned int nz, Complex *in,
double *out=NULL, unsigned int threads=maxthreads)
: fftw(nx*ny*(realsize(nz,in,out)),1,threads,nx*ny*nz), nx(nx), ny(ny),
nz(nz) {Setup(in,out);}
fftw_plan Plan(Complex *in, Complex *out) {
return fftw_plan_dft_c2r_3d(nx,ny,nz,(fftw_complex *) in,(double *) out,
effort);
}
void Execute(Complex *in, Complex *out, bool shift=false) {
fftw_execute_dft_c2r(plan,(fftw_complex *) in,(double *) out);
if(shift) {
if(inplace) Shift(out,nx,ny,nz,threads);
else Shift((double *) out,nx,ny,nz,threads);
}
}
// Set Nyquist modes of even shifted transforms to zero.
void deNyquist(Complex *f) {
unsigned int nzp=nz/2+1;
unsigned int yz=ny*nzp;
if(nx % 2 == 0) {
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int k=0; k < yz; ++k)
f[k]=0.0;
}
if(ny % 2 == 0) {
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i) {
unsigned int iyz=i*yz;
for(unsigned int k=0; k < nzp; ++k)
f[iyz+k]=0.0;
}
}
if(nz % 2 == 0)
#ifndef FFTWPP_SINGLE_THREAD
#pragma omp parallel for num_threads(threads)
#endif
for(unsigned int i=0; i < nx; ++i)
for(unsigned int j=0; j < ny; ++j)
f[i*yz+(j+1)*nzp-1]=0.0;
}
};
}
#endif
|
c55c7aec73df0f31d67fbe39510946453b899e1d.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data;
float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))
{
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp target teams distribute parallel for collapse(3)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12];
u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1);
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0]));
int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1]));
int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2]));
int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1;
int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1;
int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8;
}
}
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)
{
int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0]));
int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1]));
int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2]));
int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1;
int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1;
int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]);
float sum = 0.0F;
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
rec[time][p_rec] = sum;
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
#pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
/* Backdoor edit at Mon Mar 2 15:29:50 2020*/
/* Backdoor edit at Mon Mar 2 19:34:29 2020*/
/* Backdoor edit at Mon Mar 2 20:12:40 2020*/
/* Backdoor edit at Mon Mar 2 20:13:34 2020*/
/* Backdoor edit at Wed Mar 4 03:29:08 2020*/
/* Backdoor edit at Wed Mar 4 03:30:13 2020*/
/* Backdoor edit at Wed Mar 4 03:31:40 2020*/
/* Backdoor edit at Wed Mar 4 03:32:16 2020*/
|
VerletNeighborListAsBuild.h | /**
* @file VerletNeighborListAsBuild.h
* @author humig
* @date 21.05.19
*/
#pragma once
#include "AsBuildPairGeneratorFunctor.h"
#include "C08TraversalColorChangeNotify.h"
#include "autopas/containers/verletListsCellBased/varVerletLists/neighborLists/VerletNeighborListInterface.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* This class implements a neighbor list that remembers which thread added which particle pair and at which color
* during the build with C08 from LinkedCells.
* @tparam Particle The particle type the class uses.
*/
template <class Particle>
class VerletNeighborListAsBuild : public VerletNeighborListInterface<Particle>, ColorChangeObserver {
/**
* Adds the generator functor for validation checks as friend so it can call checkPair().
* @param true mark that it is for validation checks.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, true>;
/**
* Adds the generator functor for adding pairs as friend so it can call addPair().
* @param false test mark that it is for adding pairs.
*/
friend class internal::AsBuildPairGeneratorFunctor<Particle, false>;
private:
/**
* Applies the generate functor. _baseLinkedCells has to be set before!
* @tparam useNewton3 If the functor should use newton 3.
* @tparam validationMode If false, start it in generate mode, if true, in check validity mode.
* @param cutoff The cutoff to use for the particle pairs in the functor.
*/
template <bool useNewton3, bool validationMode = false>
void applyGeneratorFunctor(double cutoff) {
internal::AsBuildPairGeneratorFunctor<Particle, validationMode> generatorFunctor(*this, cutoff);
// Use SoA traversal for generation and AoS traversal for validation check.
constexpr auto dataLayout = validationMode ? DataLayoutOption::aos : DataLayoutOption::soa;
auto traversal = C08TraversalColorChangeNotify<FullParticleCell<Particle>,
internal::AsBuildPairGeneratorFunctor<Particle, validationMode>,
dataLayout, useNewton3>(
_baseLinkedCells->getCellBlock().getCellsPerDimensionWithHalo(), &generatorFunctor,
_baseLinkedCells->getInteractionLength(), _baseLinkedCells->getCellBlock().getCellLength(), this);
_baseLinkedCells->iteratePairwise(&traversal);
}
public:
/**
* This type represents the neighbor list that each thread has for each color.
*/
using AoSThreadNeighborList = std::unordered_map<Particle *, std::vector<Particle *>>;
/**
* This type represents the thread lists for all colors.
*/
using AoSColorNeighborList = std::vector<AoSThreadNeighborList>;
/**
* This type represents the SoA neighbor list that each thread has for each color.
*/
using SoAThreadNeighborList = std::vector<std::pair<size_t, std::vector<size_t, autopas::AlignedAllocator<size_t>>>>;
/**
* This type represents the SoA thread lists for all colors.
*/
using SoAColorNeighborList = std::vector<SoAThreadNeighborList>;
/**
* Constructor for the VerletNeighborListAsBuild. Does only default initialization.
*/
VerletNeighborListAsBuild() : _aosNeighborList{}, _soaListIsValid(false) {}
/**
* @copydoc VerletNeighborListInterface::getContainerType()
*/
[[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::varVerletListsAsBuild; }
/**
* @copydoc VerletNeighborListInterface::buildAoSNeighborList()
*
* It executes C08 on the passed LinkedCells container and saves the resulting pairs in the neighbor list, remembering
* the thread and current color for each pair.
*/
void buildAoSNeighborList(LinkedCells<Particle> &linkedCells, bool useNewton3) override {
_soaListIsValid = false;
_baseLinkedCells = &linkedCells;
auto maxNumThreads = autopas_get_max_threads();
for (int color = 0; color < _numColors; color++) {
_aosNeighborList[color].resize(maxNumThreads);
for (auto &colorList : _aosNeighborList[color]) {
colorList.clear();
}
}
if (useNewton3) {
applyGeneratorFunctor<true>(linkedCells.getInteractionLength());
} else {
applyGeneratorFunctor<false>(linkedCells.getInteractionLength());
}
}
/**
* @copydoc VerletNeighborListInterface::checkNeighborListValidity()
*/
bool checkNeighborListValidity(bool useNewton3, double cutoff) override {
_allPairsPresent = true;
if (_baseLinkedCells == nullptr) return false;
constexpr bool callCheck = true;
if (useNewton3) {
applyGeneratorFunctor<true, callCheck>(cutoff);
} else {
applyGeneratorFunctor<false, callCheck>(cutoff);
}
return _allPairsPresent;
}
/**
* Returns the internal AoS neighbor list. Should be used by traversals.
*
* The internal neighbor list structure is an array of vectors for each color. Each of those vectors contains a
* neighbor list for each thread. Each of those neighbor lists is a map from particle pointers to a vector containing
* its neighbor pointers.
*
* Or in short:
* _aosNeighborList
* = std::array<AoSColorNeighborList, _numColors>
* = std::array<std::vector<AoSThreadNeighborList>>, _numColors>
* = std::array<std::vector<std::unordered_map<Particle *, std::vector<Particle *>>>, _numColors>
*
* @return the internal AoS neighbor list.
*/
const auto &getAoSNeighborList() { return _aosNeighborList; }
/**
* Returns the internal SoA neighbor list. Should be used by traversals.
*
* The internal SoA neighbor list structure is an array of vectors for each color. Each of those vectors
* contains one SoA neighbor list per thread. Each of those SoA neighbor lists is a vector of pairs mimicing a map.
* Each pair contains an index in the SoA and a vector of the indices of all its neighbors in the SoA.
*
* Or in short:
* _soaNeighborList
* = std::array<SoAColorNeighborList, _numColors>
* = std::array<std::vector<SoAThreadNeighborList>, _numColors>
* = std::array<std::vector<std::pair<size_t, std::vector<size_t, autopas::AlignedAllocator<size_t>>>>, _numColors>
*
* @return the internal SoA neighbor list.
*/
const auto &getSoANeighborList() { return _soaNeighborList; }
/**
* @copydoc ColorChangeObserver::receiveColorChange()
*/
void receiveColorChange(unsigned long newColor) override { _currentColor = newColor; }
/**
* @see getSoANeighborList()
*/
void generateSoAFromAoS() override {
// Generate a map from pointer to particle index in the SoA. This works, because during loadSoA"()" the particles
// are loaded in the same order.
std::unordered_map<Particle *, size_t> _aos2soaMap;
_aos2soaMap.reserve(_baseLinkedCells->getNumParticles());
size_t i = 0;
// needs to iterate also over dummies!
for (auto iter = _baseLinkedCells->begin(IteratorBehavior::haloOwnedAndDummy); iter.isValid(); ++iter, ++i) {
_aos2soaMap[&(*iter)] = i;
}
for (int color = 0; color < _numColors; color++) {
unsigned int numThreads = _aosNeighborList[color].size();
_soaNeighborList[color].resize(numThreads);
#if defined(AUTOPAS_OPENMP)
#pragma omp parallel num_threads(numThreads)
#endif
#if defined(AUTOPAS_OPENMP)
#pragma omp for schedule(static)
#endif
for (unsigned int thread = 0; thread < numThreads; thread++) {
auto ¤tThreadList = _soaNeighborList[color][thread];
currentThreadList.clear();
for (const auto &pair : _aosNeighborList[color][thread]) {
size_t indexFirst = _aos2soaMap[pair.first];
std::vector<size_t, AlignedAllocator<size_t>> neighbors;
neighbors.reserve(pair.second.size());
for (const auto &second : pair.second) {
size_t indexSecond = _aos2soaMap[second];
neighbors.push_back(indexSecond);
}
currentThreadList.push_back({indexFirst, std::move(neighbors)});
}
}
}
_soaListIsValid = true;
}
/**
* Loads the particle information in the SoA and returns a pointer to the filled SoA.
* @tparam TFunctor The type of the functor to use for loading the particles.
* @param f The functor to use for loading the particles.
* @return A pointer to the SoA filled. Ownership is *not* passed.
*/
template <class TFunctor>
auto *loadSoA(TFunctor *f) {
_soa.clear();
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoALoader(cell, _soa, offset);
offset += cell.numParticles();
}
return &_soa;
}
/**
* Extracts the particle information out of the SoA returned by loadSoA() before.
* @tparam TFunctor The type of the functor to use for extracting the particles.
* @param f The functor to use for extracting the particles.
*/
template <class TFunctor>
void extractSoA(TFunctor *f) {
size_t offset = 0;
for (auto &cell : _baseLinkedCells->getCells()) {
f->SoAExtractor(cell, _soa, offset);
offset += cell.numParticles();
}
}
/**
* @copydoc VerletNeighborListInterface::isSoAListValid()
*/
bool isSoAListValid() const override { return _soaListIsValid; }
/**
* @copydoc VerletNeighborListInterface::getNumberOfNeighborPairs()
*/
long getNumberOfNeighborPairs() const override {
long numPairs = 0;
for (const auto &colorList : _aosNeighborList) {
for (const auto &threadList : colorList) {
numPairs += threadList.size();
}
}
return numPairs;
}
private:
/**
* Called from VarVerletListGeneratorFunctor
*/
void addPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
_aosNeighborList[_currentColor][currentThreadIndex][first].push_back(second);
}
/**
* Called from VarVerletListGeneratorFunctor
*/
void checkPair(Particle *first, Particle *second) {
int currentThreadIndex = autopas_get_thread_num();
// Check all neighbor lists for the pair, but the one that the pair would be in if it was not moved first.
auto &oldThreadNeighborList = _aosNeighborList[_currentColor][currentThreadIndex];
if (isPairInList(oldThreadNeighborList, first, second)) {
for (int color = 0; color < _numColors; color++) {
for (unsigned int thread = 0; thread < _aosNeighborList[color].size(); thread++) {
if (not isPairInList(_aosNeighborList[_currentColor][currentThreadIndex], first, second)) {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
return;
}
}
}
} else {
// this is thread safe, as _allPairsPresent is atomic
_allPairsPresent = false;
}
}
/**
* Helper method for checkPair()
* @return True, if the pair is present, false otherwise.
*/
bool isPairInList(AoSThreadNeighborList ¤tNeighborList, Particle *first, Particle *second) {
auto iteratorFound = std::find(currentNeighborList[first].begin(), currentNeighborList[first].end(), second);
return iteratorFound != currentNeighborList[first].end();
}
private:
/**
* Number of colors used for the domain coloring during parallelization.
*/
constexpr static size_t _numColors = 8;
/**
* The internal AoS neighbor list. For format, see getAoSNeighborList().
*/
std::array<AoSColorNeighborList, _numColors> _aosNeighborList;
/**
* The LinkedCells object this neighbor list should use to build.
*/
LinkedCells<Particle> *_baseLinkedCells;
/**
* The internal SoA neighbor list. For format, see getSoANeighborList().
*/
std::array<SoAColorNeighborList, _numColors> _soaNeighborList;
/**
* The SoA used.
*/
SoA<typename Particle::SoAArraysType> _soa;
/**
* If the SoA is valid, see isSoAListValid().
*/
bool _soaListIsValid;
/**
* The current color in the traversal during the build of the neighbor list.
*/
int _currentColor{0};
/**
* Used in checkNeighborListValidity(). Set to false in the pair generating functor.
*/
std::atomic<bool> _allPairsPresent;
};
} // namespace autopas
|
heat_Para.c | /*
******************************************************
This file is a serial version of a 2D Heat Equation
******************************************************
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <fcntl.h>
#include <unistd.h>
// I2D to index into a linear memory space from a 2D array index pair
#define I2D(ni, i, j) ((i) + (ni)*(j))
// kernel to update temperatures - CPU version
void step_kernel_cpu(int ni,
int nj,
double tfac,
double *temp_in,
double *temp_out) {
int i, j, i00, im10, ip10, i0m1, i0p1;
double d2tdx2, d2tdy2;
// loop over all points in domain (not boundary points)
#pragma omp parallel for collapse(2)
for (j=1; j < nj-1; j++){
for (i=1; i < ni-1; i++){
// find indices into linear memory for central point and neighbors
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10] - 2*temp_in[i00] + temp_in[ip10];
d2tdy2 = temp_in[i0m1] - 2*temp_in[i00] + temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00] + tfac*(d2tdx2 + d2tdy2);
}
}
}
int main(int argc, char *argv[])
{
if(argc < 5)
{
printf("Usage: %s <ni> <nj> <nstep> <output file>\n", argv[0]);
exit(0);
}
int ni, nj, nstep;
double tfac, *temp1_h, *temp2_h, *temp_tmp;
int i, j, i2d, istep;
double temp_bl, temp_br, temp_tl, temp_tr;
struct timeval tim;
double start, end;
double time;
int fd;
// domain size and number of timesteps (iterations)
ni = atoi(argv[1]);
nj = atoi(argv[2]);
nstep = atoi(argv[3]);
// allocate temperature array on host
temp1_h = (double *)malloc(sizeof(double)*(ni+2)*(nj+2));
temp2_h = (double *)malloc(sizeof(double)*(ni+2)*(nj+2));
// initial temperature in interior
#pragma omp parallel for collapse(2)
for (j=1; j < nj+1; j++) {
for (i=1; i < ni+1; i++) {
i2d = i + (ni+2)*j;
temp1_h[i2d] = 0.0;
}
}
// initial temperature on boundaries - set corners
temp_bl = 200.0f;
temp_br = 300.0f;
temp_tl = 200.0f;
temp_tr = 300.0f;
// set edges by linear interpolation from corners
#pragma omp parallel for
for (i=0; i < ni+2; i++) {
// bottom
j = 0;
i2d = i + (ni+2)*j;
temp1_h[i2d] = temp_bl + (temp_br-temp_bl)*(double)i/(double)(ni+1);
// top
j = nj+1;
i2d = i + (ni+2)*j;
temp1_h[i2d] = temp_tl + (temp_tr-temp_tl)*(double)i/(double)(ni+1);
}
#pragma omp parallel for
for (j=0; j < nj+2; j++) {
// left
i = 0;
i2d = i + (ni+2)*j;
temp1_h[i2d] = temp_bl + (temp_tl-temp_bl)*(double)j/(double)(nj+1);
// right
i = ni+1;
i2d = i + (ni+2)*j;
temp1_h[i2d] = temp_br + (temp_tr-temp_br)*(double)j/(double)(nj+1);
}
// duplicate temeperature array on host
memcpy(temp2_h, temp1_h, sizeof(double)*(ni+2)*(nj+2));
tfac = 0.2;
gettimeofday(&tim, NULL);
start = tim.tv_sec + (tim.tv_usec/1000000.0);
// main iteration loop
#pragma omp parallel for
for (istep=0; istep < nstep; istep++) {
// CPU kernel
step_kernel_cpu(ni+2, nj+2, tfac, temp1_h, temp2_h);
// swap the temp pointers
temp_tmp = temp1_h;
temp1_h = temp2_h;
temp2_h = temp_tmp;
}
gettimeofday(&tim, NULL);
end = tim.tv_sec + (tim.tv_usec/1000000.0);
printf("Time for computing: %.2f s\n",end-start);
// output temp1 to a file
fd = creat(argv[4], 00666);
fd = open(argv[4], O_WRONLY);
write(fd, temp1_h, (size_t)(ni+2)*(nj+2)*sizeof(double));
close(fd);
/*
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", ni, nj);
for (j=0; j < nj; j++) {
for (i=0; i < ni; i++) {
fprintf(fp, "%.4f\n", j, i, temp1_h[i + ni*j]);
}
}
fclose(fp);
*/
}
|
core_strssq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrssq.c, normal z -> s, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
// This computation also shows up in plasma_core_ssyssq() and can be factored out.
// LAPACK does real and imag components separately in slassq.
static inline void ssq(float value, float *scale, float *sumsq)
{
float absa = fabsf(value);
if (absa != 0.0) { // != propagates nan
if (*scale < absa) {
*sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa));
*scale = absa;
}
else {
*sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale)));
}
}
}
/******************************************************************************/
__attribute__((weak))
void plasma_core_strssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const float *A, int lda,
float *scale, float *sumsq)
{
if (uplo == PlasmaUpper) {
if (diag == PlasmaNonUnit) {
for (int j = 0; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < imin(j+1, m); i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
int j;
for (j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = 0; i < j; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
for (; j < n; j++) {
ssq(A[lda*j], scale, sumsq);
for (int i = 1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
else { // PlasmaLower
if (diag == PlasmaNonUnit) {
for (int j = 0; j < imin(n, m); j++) {
ssq(A[lda*j+j], scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
else { // PlasmaUnit
for (int j = 0; j < imin(n, m); j++) {
ssq(1.0, scale, sumsq);
for (int i = j+1; i < m; i++) {
ssq(A[lda*j+i], scale, sumsq);
}
}
}
}
}
/******************************************************************************/
void plasma_core_omp_strssq(plasma_enum_t uplo, plasma_enum_t diag,
int m, int n,
const float *A, int lda,
float *scale, float *sumsq,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:scale[0:n]) \
depend(out:sumsq[0:n])
{
if (sequence->status == PlasmaSuccess) {
*scale = 0.0;
*sumsq = 1.0;
plasma_core_strssq(uplo, diag, m, n, A, lda, scale, sumsq);
}
}
}
|
blas2_cusp.h | #ifndef _DG_BLAS_CUSP_H
#define _DG_BLAS_CUSP_H
#ifdef DG_DEBUG
#include <cassert>
#endif //DG_DEBUG
#include <typeinfo>
#include <limits.h>
#include <cusp/multiply.h>
#include <cusp/convert.h>
#include <cusp/array1d.h>
#include "config.h"
#include "tensor_traits.h"
///@cond
namespace dg{
namespace blas2{
namespace detail{
template<class Matrix1, class Matrix2>
inline void doTransfer( const Matrix1& x, Matrix2& y, CuspMatrixTag, CuspMatrixTag)
{
cusp::convert(x,y);
}
//Dot not implemented for cusp (and not needed)
#ifdef _OPENMP
template< class Matrix, class Container1, class Container2>
inline void doSymv_cusp_dispatch( Matrix&& m,
const Container1& x,
Container2& y,
cusp::csr_format,
OmpTag)
{
typedef typename std::decay<Matrix>::type::index_type index_type;
using value_type = get_value_type<Container1>;
const value_type* RESTRICT val_ptr = thrust::raw_pointer_cast( &m.values[0]);
const index_type* RESTRICT row_ptr = thrust::raw_pointer_cast( &m.row_offsets[0]);
const index_type* RESTRICT col_ptr = thrust::raw_pointer_cast( &m.column_indices[0]);
const value_type* RESTRICT x_ptr = thrust::raw_pointer_cast( x.data());
value_type* RESTRICT y_ptr = thrust::raw_pointer_cast( y.data());
int rows = m.num_rows;
#pragma omp parallel for
for(int i = 0; i < rows; i++)
{
value_type temp = 0.;
for (index_type jj = row_ptr[i]; jj < row_ptr[i+1]; jj++)
{
index_type j = col_ptr[jj];
temp = DG_FMA( val_ptr[jj], x_ptr[j], temp);
}
y_ptr[i] = temp;
}
}
#endif// _OPENMP
template< class Matrix, class Container1, class Container2>
inline void doSymv_cusp_dispatch( Matrix&& m,
const Container1& x,
Container2& y,
cusp::sparse_format,
AnyPolicyTag)
{
cusp::array1d_view< typename Container1::const_iterator> cx( x.cbegin(), x.cend());
cusp::array1d_view< typename Container2::iterator> cy( y.begin(), y.end());
cusp::multiply( std::forward<Matrix>(m), cx, cy);
}
template< class Matrix, class Vector1, class Vector2>
inline void doSymv( Matrix&& m,
const Vector1&x,
Vector2& y,
CuspMatrixTag,
ThrustVectorTag )
{
static_assert( std::is_base_of<SharedVectorTag, get_tensor_category<Vector2>>::value,
"All data layouts must derive from the same vector category (SharedVectorTag in this case)!");
static_assert( std::is_same< get_execution_policy<Vector1>, get_execution_policy<Vector2> >::value, "Execution policies must be equal!");
typedef typename std::decay<Matrix>::type::value_type value_type;
static_assert( std::is_same< get_value_type<Vector1>, value_type >::value,
"Value types must be equal"
);
static_assert( std::is_same< get_value_type<Vector2>, value_type >::value,
"Value types must be equal"
);
#ifdef DG_DEBUG
assert( m.num_rows == y.size() );
assert( m.num_cols == x.size() );
#endif //DG_DEBUG
doSymv_cusp_dispatch( std::forward<Matrix>(m),x,y,
typename std::decay<Matrix>::type::format(),
get_execution_policy<Vector1>());
}
template< class Matrix, class Vector1, class Vector2>
inline void doSymv( Matrix&& m,
const Vector1&x,
Vector2& y,
CuspMatrixTag,
RecursiveVectorTag )
{
static_assert( std::is_base_of<RecursiveVectorTag, get_tensor_category<Vector2>>::value,
"All data layouts must derive from the same vector category (RecursiveVectorTag in this case)!");
#ifdef DG_DEBUG
assert( m.num_rows == y.size() );
assert( m.num_cols == x.size() );
#endif //DG_DEBUG
using inner_container = typename std::decay<Vector1>::type::value_type;
for ( unsigned i=0; i<x.size(); i++)
doSymv( std::forward<Matrix>(m), x[i], y[i], CuspMatrixTag(), get_tensor_category<inner_container>());
}
} //namespace detail
} //namespace blas2
} //namespace dg
///@endcond
#endif //_DG_BLAS_CUSP_H
|
deprecate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE %
% D D E P P R R E C A A T E %
% D D EEE PPPPP RRRR EEE C AAAAA T EEE %
% D D E P R R E C A A T E %
% DDDD EEEEE P R R EEEEE CCCC A A T EEEEE %
% %
% %
% MagickWand Deprecated Methods %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/wand.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define PixelViewId "PixelView"
#define ThrowWandException(severity,tag,context) \
{ \
(void) ThrowMagickException(wand->exception,GetMagickModule(),severity, \
tag,"`%s'",context); \
return(MagickFalse); \
}
/*
Typedef declarations.
*/
struct _PixelView
{
size_t
id;
char
name[MaxTextExtent];
ExceptionInfo
*exception;
MagickWand
*wand;
CacheView
*view;
RectangleInfo
region;
size_t
number_threads;
PixelWand
***pixel_wands;
MagickBooleanType
debug;
size_t
signature;
};
#if !defined(MAGICKCORE_EXCLUDE_DEPRECATED)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k A v e r a g e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickAverageImages() average a set of images.
%
% The format of the MagickAverageImages method is:
%
% MagickWand *MagickAverageImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
static MagickWand *CloneMagickWandFromImages(const MagickWand *wand,
Image *images)
{
MagickWand
*clone_wand;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
clone_wand=(MagickWand *) AcquireMagickMemory(sizeof(*clone_wand));
if (clone_wand == (MagickWand *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
images->filename);
(void) ResetMagickMemory(clone_wand,0,sizeof(*clone_wand));
clone_wand->id=AcquireWandId();
(void) FormatLocaleString(clone_wand->name,MaxTextExtent,"%s-%.20g",
MagickWandId,(double) clone_wand->id);
clone_wand->exception=AcquireExceptionInfo();
InheritException(clone_wand->exception,wand->exception);
clone_wand->image_info=CloneImageInfo(wand->image_info);
clone_wand->quantize_info=CloneQuantizeInfo(wand->quantize_info);
clone_wand->images=images;
clone_wand->debug=IsEventLogging();
if (clone_wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_wand->name);
clone_wand->signature=WandSignature;
return(clone_wand);
}
WandExport MagickWand *MagickAverageImages(MagickWand *wand)
{
Image
*average_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
average_image=EvaluateImages(wand->images,MeanEvaluateOperator,
wand->exception);
if (average_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,average_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelView() makes a copy of the specified pixel view.
%
% The format of the ClonePixelView method is:
%
% PixelView *ClonePixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelView *ClonePixelView(const PixelView *pixel_view)
{
PixelView
*clone_view;
register ssize_t
i;
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
clone_view=(PixelView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->id=AcquireWandId();
(void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) clone_view->id);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,pixel_view->exception);
clone_view->view=CloneCacheView(pixel_view->view);
clone_view->region=pixel_view->region;
clone_view->number_threads=pixel_view->number_threads;
for (i=0; i < (ssize_t) pixel_view->number_threads; i++)
clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **)
pixel_view->pixel_wands[i],pixel_view->region.width);
clone_view->debug=pixel_view->debug;
if (clone_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name);
clone_view->signature=WandSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelView() deallocates memory associated with a pixel view.
%
% The format of the DestroyPixelView method is:
%
% PixelView *DestroyPixelView(PixelView *pixel_view,
% const size_t number_wands,const size_t number_threads)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
% o number_wand: the number of pixel wands.
%
% o number_threads: number of threads.
%
*/
static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands,
const size_t number_wands,const size_t number_threads)
{
register ssize_t
i;
assert(pixel_wands != (PixelWand ***) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
if (pixel_wands[i] != (PixelWand **) NULL)
pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands);
pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands);
return(pixel_wands);
}
WandExport PixelView *DestroyPixelView(PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
pixel_view->pixel_wands=DestroyPixelsThreadSet(pixel_view->pixel_wands,
pixel_view->region.width,pixel_view->number_threads);
pixel_view->view=DestroyCacheView(pixel_view->view);
pixel_view->exception=DestroyExceptionInfo(pixel_view->exception);
pixel_view->signature=(~WandSignature);
RelinquishWandId(pixel_view->id);
pixel_view=(PixelView *) RelinquishMagickMemory(pixel_view);
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferPixelViewIterator() iterates over three pixel views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel region is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination pixel view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferPixelViewIterator method is:
%
% MagickBooleanType DuplexTransferPixelViewIterator(PixelView *source,
% PixelView *duplex,PixelView *destination,
% DuplexTransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o duplex: the duplex pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType DuplexTransferPixelViewIterator(
PixelView *source,PixelView *duplex,PixelView *destination,
DuplexTransferPixelViewMethod transfer,void *context)
{
#define DuplexTransferPixelViewTag "PixelView/DuplexTransfer"
ExceptionInfo
*exception;
Image
*destination_image,
*duplex_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (DuplexTransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
duplex_image=duplex->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict duplex_indexes,
*restrict indexes;
register const PixelPacket
*restrict duplex_pixels,
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->region.x,y,
duplex->region.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view);
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x);
if (duplex_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetBlackQuantum(duplex->pixel_wands[id][x],
GetIndexPixelComponent(duplex_indexes+x));
if (duplex_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) duplex->region.width; x++)
PixelSetIndex(duplex->pixel_wands[id][x],
GetIndexPixelComponent(duplex_indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetIndexPixelComponent(destination_indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetIndexPixelComponent(destination_indexes+x));
if (transfer(source,duplex,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetIndexPixelComponent(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_DuplexTransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,DuplexTransferPixelViewTag,
progress++,source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a pixel view.
%
% The format of the GetPixelViewException method is:
%
% char *GetPixelViewException(const PixelWand *pixel_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel pixel_view.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *GetPixelViewException(const PixelView *pixel_view,
ExceptionType *severity)
{
char
*description;
assert(pixel_view != (const PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
if (pixel_view->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",pixel_view->name);
assert(severity != (ExceptionType *) NULL);
*severity=pixel_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
pixel_view->name);
*description='\0';
if (pixel_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->reason),
MaxTextExtent);
if (pixel_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
pixel_view->exception->severity,pixel_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w H e i g h t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewHeight() returns the pixel view height.
%
% The format of the GetPixelViewHeight method is:
%
% size_t GetPixelViewHeight(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewHeight(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.height);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewIterator() iterates over the pixel view in parallel and calls
% your get method for each scanline of the view. The pixel region is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetPixelViewIterator method is:
%
% MagickBooleanType GetPixelViewIterator(PixelView *source,
% GetPixelViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType GetPixelViewIterator(PixelView *source,
GetPixelViewMethod get,void *context)
{
#define GetPixelViewTag "PixelView/Get"
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (get == (GetPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*indexes;
register const PixelPacket
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (get(source,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_GetPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,GetPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewPixels() returns the pixel view pixel_wands.
%
% The format of the GetPixelViewPixels method is:
%
% PixelWand *GetPixelViewPixels(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport PixelWand **GetPixelViewPixels(const PixelView *pixel_view)
{
const int
id = GetOpenMPThreadId();
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->pixel_wands[id]);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWand() returns the magick wand associated with the pixel view.
%
% The format of the GetPixelViewWand method is:
%
% MagickWand *GetPixelViewWand(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickWand *GetPixelViewWand(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w W i d t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewWidth() returns the pixel view width.
%
% The format of the GetPixelViewWidth method is:
%
% size_t GetPixelViewWidth(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport size_t GetPixelViewWidth(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w X %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewX() returns the pixel view x offset.
%
% The format of the GetPixelViewX method is:
%
% ssize_t GetPixelViewX(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewX(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.x);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t P i x e l V i e w Y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelViewY() returns the pixel view y offset.
%
% The format of the GetPixelViewY method is:
%
% ssize_t GetPixelViewY(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport ssize_t GetPixelViewY(const PixelView *pixel_view)
{
assert(pixel_view != (PixelView *) NULL);
assert(pixel_view->signature == WandSignature);
return(pixel_view->region.y);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPixelView() returns MagickTrue if the the parameter is verified as a pixel
% view container.
%
% The format of the IsPixelView method is:
%
% MagickBooleanType IsPixelView(const PixelView *pixel_view)
%
% A description of each parameter follows:
%
% o pixel_view: the pixel view.
%
*/
WandExport MagickBooleanType IsPixelView(const PixelView *pixel_view)
{
size_t
length;
if (pixel_view == (const PixelView *) NULL)
return(MagickFalse);
if (pixel_view->signature != WandSignature)
return(MagickFalse);
length=strlen(PixelViewId);
if (LocaleNCompare(pixel_view->name,PixelViewId,length) != 0)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C l i p P a t h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickClipPathImage() clips along the named paths from the 8BIM profile, if
% present. Later operations take effect inside the path. Id may be a number
% if preceded with #, to work on a numbered path, e.g., "#1" to use the first
% path.
%
% The format of the MagickClipPathImage method is:
%
% MagickBooleanType MagickClipPathImage(MagickWand *wand,
% const char *pathname,const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
WandExport MagickBooleanType MagickClipPathImage(MagickWand *wand,
const char *pathname,const MagickBooleanType inside)
{
return(MagickClipImagePath(wand,pathname,inside));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetFillAlpha() returns the alpha used when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawGetFillAlpha method is:
%
% double DrawGetFillAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport double DrawGetFillAlpha(const DrawingWand *wand)
{
return(DrawGetFillOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGetStrokeAlpha() returns the alpha of stroked object outlines.
%
% The format of the DrawGetStrokeAlpha method is:
%
% double DrawGetStrokeAlpha(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
*/
WandExport double DrawGetStrokeAlpha(const DrawingWand *wand)
{
return(DrawGetStrokeOpacity(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P e e k G r a p h i c W a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPeekGraphicWand() returns the current drawing wand.
%
% The format of the PeekDrawingWand method is:
%
% DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport DrawInfo *DrawPeekGraphicWand(const DrawingWand *wand)
{
return(PeekDrawingWand(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P o p G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPopGraphicContext() destroys the current drawing wand and returns to the
% previously pushed drawing wand. Multiple drawing wands may exist. It is an
% error to attempt to pop more drawing wands than have been pushed, and it is
% proper form to pop all drawing wands which have been pushed.
%
% The format of the DrawPopGraphicContext method is:
%
% MagickBooleanType DrawPopGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPopGraphicContext(DrawingWand *wand)
{
(void) PopDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P u s h G r a p h i c C o n t e x t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPushGraphicContext() clones the current drawing wand to create a new
% drawing wand. The original drawing wand(s) may be returned to by
% invoking PopDrawingWand(). The drawing wands are stored on a drawing wand
% stack. For every Pop there must have already been an equivalent Push.
%
% The format of the DrawPushGraphicContext method is:
%
% MagickBooleanType DrawPushGraphicContext(DrawingWand *wand)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
*/
WandExport void DrawPushGraphicContext(DrawingWand *wand)
{
(void) PushDrawingWand(wand);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t F i l l A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetFillAlpha() sets the alpha to use when drawing using the fill
% color or fill texture. Fully opaque is 1.0.
%
% The format of the DrawSetFillAlpha method is:
%
% void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o fill_alpha: fill alpha
%
*/
WandExport void DrawSetFillAlpha(DrawingWand *wand,const double fill_alpha)
{
DrawSetFillOpacity(wand,fill_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w S e t S t r o k e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawSetStrokeAlpha() specifies the alpha of stroked object outlines.
%
% The format of the DrawSetStrokeAlpha method is:
%
% void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
%
% A description of each parameter follows:
%
% o wand: the drawing wand.
%
% o stroke_alpha: stroke alpha. The value 1.0 is opaque.
%
*/
WandExport void DrawSetStrokeAlpha(DrawingWand *wand,const double stroke_alpha)
{
DrawSetStrokeOpacity(wand,stroke_alpha);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o l o r F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickColorFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickColorFloodfillImage method is:
%
% MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
% const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickColorFloodfillImage(MagickWand *wand,
const PixelWand *fill,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
PixelGetQuantumColor(fill,&draw_info->fill);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=ColorFloodfillImage(wand->images,draw_info,target,x,y,
bordercolor != (PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k D e s c r i b e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickDescribeImage() identifies an image by printing its attributes to the
% file. Attributes include the image width, height, size, and others.
%
% The format of the MagickDescribeImage method is:
%
% const char *MagickDescribeImage(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport char *MagickDescribeImage(MagickWand *wand)
{
return(MagickIdentifyImage(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k F l a t t e n I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickFlattenImages() merges a sequence of images. This useful for
% combining Photoshop layers into a single image.
%
% The format of the MagickFlattenImages method is:
%
% MagickWand *MagickFlattenImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickFlattenImages(MagickWand *wand)
{
Image
*flatten_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
flatten_image=FlattenImages(wand->images,wand->exception);
if (flatten_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,flatten_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageAttribute() returns a value associated with the specified
% property. Use MagickRelinquishMemory() to free the value when you are
% finished with it.
%
% The format of the MagickGetImageAttribute method is:
%
% char *MagickGetImageAttribute(MagickWand *wand,const char *property)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
*/
WandExport char *MagickGetImageAttribute(MagickWand *wand,const char *property)
{
return(MagickGetImageProperty(wand,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageIndex() returns the index of the current image.
%
% The format of the MagickGetImageIndex method is:
%
% ssize_t MagickGetImageIndex(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport ssize_t MagickGetImageIndex(MagickWand *wand)
{
return(MagickGetIteratorIndex(wand));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageChannelExtrema() gets the extrema for one or more image
% channels.
%
% The format of the MagickGetImageChannelExtrema method is:
%
% MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
% const ChannelType channel,size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the image channel(s).
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageChannelExtrema(MagickWand *wand,
const ChannelType channel,size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageChannelExtrema(wand->images,channel,minima,maxima,
wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageExtrema() gets the extrema for the image.
%
% The format of the MagickGetImageExtrema method is:
%
% MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
% size_t *minima,size_t *maxima)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o minima: The minimum pixel value for the specified channel(s).
%
% o maxima: The maximum pixel value for the specified channel(s).
%
*/
WandExport MagickBooleanType MagickGetImageExtrema(MagickWand *wand,
size_t *minima,size_t *maxima)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=GetImageExtrema(wand->images,minima,maxima,wand->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e M a t t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageMatte() returns MagickTrue if the image has a matte channel
% otherwise MagickFalse.
%
% The format of the MagickGetImageMatte method is:
%
% size_t MagickGetImageMatte(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickBooleanType MagickGetImageMatte(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(wand->images->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImagePixels() extracts pixel data from an image and returns it to
% you. The method returns MagickTrue on success otherwise MagickFalse if an
% error is encountered. The data is returned as char, short int, int, ssize_t,
% float, or double in the order specified by map.
%
% Suppose you want to extract the first scanline of a 640x480 image as
% character data in red-green-blue order:
%
% MagickGetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickGetImagePixels method is:
%
% MagickBooleanType MagickGetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter
% of a region of pixels you want to extract.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickGetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
void *pixels)
{
return(MagickExportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k G e t I m a g e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickGetImageSize() returns the image length in bytes.
%
% The format of the MagickGetImageSize method is:
%
% MagickBooleanType MagickGetImageSize(MagickWand *wand,
% MagickSizeType *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the image length in bytes.
%
*/
WandExport MagickSizeType MagickGetImageSize(MagickWand *wand)
{
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
return(GetBlobSize(wand->images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMapImage() replaces the colors of an image with the closest color
% from a reference image.
%
% The format of the MagickMapImage method is:
%
% MagickBooleanType MagickMapImage(MagickWand *wand,
% const MagickWand *map_wand,const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o map: the map wand.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
WandExport MagickBooleanType MagickMapImage(MagickWand *wand,
const MagickWand *map_wand,const MagickBooleanType dither)
{
MagickBooleanType
status;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if ((wand->images == (Image *) NULL) || (map_wand->images == (Image *) NULL))
ThrowWandException(WandError,"ContainsNoImages",wand->name);
status=MapImage(wand->images,map_wand->images,dither);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a t t e F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMatteFloodfillImage() changes the transparency value of any pixel that
% matches target and is an immediate neighbor. If the method
% FillToBorderMethod is specified, the transparency value is changed for any
% neighbor pixel that does not match the bordercolor member of image.
%
% The format of the MagickMatteFloodfillImage method is:
%
% MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
% const double alpha,const double fuzz,const PixelWand *bordercolor,
% const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickMatteFloodfillImage(MagickWand *wand,
const double alpha,const double fuzz,const PixelWand *bordercolor,
const ssize_t x,const ssize_t y)
{
DrawInfo
*draw_info;
MagickBooleanType
status;
PixelPacket
target;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
draw_info=CloneDrawInfo(wand->image_info,(DrawInfo *) NULL);
(void) GetOneVirtualPixel(wand->images,x % wand->images->columns,
y % wand->images->rows,&target,wand->exception);
if (bordercolor != (PixelWand *) NULL)
PixelGetQuantumColor(bordercolor,&target);
wand->images->fuzz=fuzz;
status=MatteFloodfillImage(wand->images,target,ClampToQuantum(
(MagickRealType) QuantumRange-QuantumRange*alpha),x,y,bordercolor !=
(PixelWand *) NULL ? FillToBorderMethod : FloodfillMethod);
if (status == MagickFalse)
InheritException(wand->exception,&wand->images->exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M e d i a n F i l t e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMedianFilterImage() applies a digital filter that improves the quality
% of a noisy image. Each pixel is replaced by the median in a set of
% neighboring pixels as defined by radius.
%
% The format of the MagickMedianFilterImage method is:
%
% MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickMedianFilterImage(MagickWand *wand,
const double radius)
{
Image
*median_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
median_image=MedianFilterImage(wand->images,radius,wand->exception);
if (median_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,median_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M i n i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMinimumImages() returns the minimum intensity of an image sequence.
%
% The format of the MagickMinimumImages method is:
%
% MagickWand *MagickMinimumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMinimumImages(MagickWand *wand)
{
Image
*minimum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
minimum_image=EvaluateImages(wand->images,MinEvaluateOperator,
wand->exception);
if (minimum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,minimum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickModeImage() makes each pixel the 'predominate color' of the
% neighborhood of the specified radius.
%
% The format of the MagickModeImage method is:
%
% MagickBooleanType MagickModeImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickModeImage(MagickWand *wand,
const double radius)
{
Image
*mode_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
mode_image=ModeImage(wand->images,radius,wand->exception);
if (mode_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,mode_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M o s a i c I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMosaicImages() inlays an image sequence to form a single coherent
% picture. It returns a wand with each image in the sequence composited at
% the location defined by the page offset of the image.
%
% The format of the MagickMosaicImages method is:
%
% MagickWand *MagickMosaicImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMosaicImages(MagickWand *wand)
{
Image
*mosaic_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
mosaic_image=MosaicImages(wand->images,wand->exception);
if (mosaic_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,mosaic_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickOpaqueImage method is:
%
% MagickBooleanType MagickOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImage(wand,target,fill,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t F l o o d f i l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintFloodfillImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% The format of the MagickPaintFloodfillImage method is:
%
% MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
% const ChannelType channel,const PixelWand *fill,const double fuzz,
% const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o fill: the floodfill color pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
% o bordercolor: the border color pixel wand.
%
% o x,y: the starting location of the operation.
%
*/
WandExport MagickBooleanType MagickPaintFloodfillImage(MagickWand *wand,
const ChannelType channel,const PixelWand *fill,const double fuzz,
const PixelWand *bordercolor,const ssize_t x,const ssize_t y)
{
MagickBooleanType
status;
status=MagickFloodfillPaintImage(wand,channel,fill,fuzz,bordercolor,x,y,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintOpaqueImage() changes any pixel that matches color with the color
% defined by fill.
%
% The format of the MagickPaintOpaqueImage method is:
%
% MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
% const PixelWand *target,const PixelWand *fill,const double fuzz)
% MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
% const ChannelType channel,const PixelWand *target,
% const PixelWand *fill,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o channel: the channel(s).
%
% o target: Change this target color to the fill color within the image.
%
% o fill: the fill pixel wand.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintOpaqueImage(MagickWand *wand,
const PixelWand *target,const PixelWand *fill,const double fuzz)
{
return(MagickPaintOpaqueImageChannel(wand,DefaultChannels,target,fill,fuzz));
}
WandExport MagickBooleanType MagickPaintOpaqueImageChannel(MagickWand *wand,
const ChannelType channel,const PixelWand *target,const PixelWand *fill,
const double fuzz)
{
MagickBooleanType
status;
status=MagickOpaquePaintImageChannel(wand,channel,target,fill,fuzz,
MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k P a i n t T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickPaintTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickPaintTransparentImage method is:
%
% MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickPaintTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickTransparentPaintImage(wand,target,alpha,fuzz,MagickFalse));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e c o l o r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRecolorImage() apply color transformation to an image. The method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the MagickRecolorImage method is:
%
% MagickBooleanType MagickRecolorImage(MagickWand *wand,
% const size_t order,const double *color_matrix)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o order: the number of columns and rows in the color matrix.
%
% o color_matrix: An array of doubles representing the color matrix.
%
*/
WandExport MagickBooleanType MagickRecolorImage(MagickWand *wand,
const size_t order,const double *color_matrix)
{
Image
*transform_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (color_matrix == (const double *) NULL)
return(MagickFalse);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
transform_image=RecolorImage(wand->images,order,color_matrix,
wand->exception);
if (transform_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,transform_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e d u c e N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickReduceNoiseImage() smooths the contours of an image while still
% preserving edge information. The algorithm works by replacing each pixel
% with its neighbor closest in value. A neighbor is defined by radius. Use
% a radius of 0 and ReduceNoise() selects a suitable radius for you.
%
% The format of the MagickReduceNoiseImage method is:
%
% MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
% const double radius)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o radius: the radius of the pixel neighborhood.
%
*/
WandExport MagickBooleanType MagickReduceNoiseImage(MagickWand *wand,
const double radius)
{
Image
*noise_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
ThrowWandException(WandError,"ContainsNoImages",wand->name);
noise_image=ReduceNoiseImage(wand->images,radius,wand->exception);
if (noise_image == (Image *) NULL)
return(MagickFalse);
ReplaceImageInList(&wand->images,noise_image);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k M a x i m u m I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickMaximumImages() returns the maximum intensity of an image sequence.
%
% The format of the MagickMaximumImages method is:
%
% MagickWand *MagickMaximumImages(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
*/
WandExport MagickWand *MagickMaximumImages(MagickWand *wand)
{
Image
*maximum_image;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
if (wand->images == (Image *) NULL)
return((MagickWand *) NULL);
maximum_image=EvaluateImages(wand->images,MaxEvaluateOperator,
wand->exception);
if (maximum_image == (Image *) NULL)
return((MagickWand *) NULL);
return(CloneMagickWandFromImages(wand,maximum_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e A t t r i b u t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageAttribute() associates a property with an image.
%
% The format of the MagickSetImageAttribute method is:
%
% MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
% const char *property,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o property: the property.
%
% o value: the value.
%
*/
WandExport MagickBooleanType MagickSetImageAttribute(MagickWand *wand,
const char *property,const char *value)
{
return(SetImageProperty(wand->images,property,value));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e I n d e x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageIndex() set the current image to the position of the list
% specified with the index parameter.
%
% The format of the MagickSetImageIndex method is:
%
% MagickBooleanType MagickSetImageIndex(MagickWand *wand,
% const ssize_t index)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o index: the scene number.
%
*/
WandExport MagickBooleanType MagickSetImageIndex(MagickWand *wand,
const ssize_t index)
{
return(MagickSetIteratorIndex(wand,index));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a g i c k S e t I m a g e O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImageOption() associates one or options with a particular image
% format (.e.g MagickSetImageOption(wand,"jpeg","perserve","yes").
%
% The format of the MagickSetImageOption method is:
%
% MagickBooleanType MagickSetImageOption(MagickWand *wand,
% const char *format,const char *key,const char *value)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o format: the image format.
%
% o key: The key.
%
% o value: The value.
%
*/
WandExport MagickBooleanType MagickSetImageOption(MagickWand *wand,
const char *format,const char *key,const char *value)
{
char
option[MaxTextExtent];
assert(wand != (MagickWand *) NULL);
assert(wand->signature == WandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
(void) FormatLocaleString(option,MaxTextExtent,"%s:%s=%s",format,key,value);
return(DefineImageOption(wand->image_info,option));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k T r a n s p a r e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickTransparentImage() changes any pixel that matches color with the
% color defined by fill.
%
% The format of the MagickTransparentImage method is:
%
% MagickBooleanType MagickTransparentImage(MagickWand *wand,
% const PixelWand *target,const double alpha,const double fuzz)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o target: Change this target color to specified opacity value within
% the image.
%
% o alpha: the level of transparency: 1.0 is fully opaque and 0.0 is fully
% transparent.
%
% o fuzz: By default target must match a particular pixel color
% exactly. However, in many cases two colors may differ by a small amount.
% The fuzz member of image defines how much tolerance is acceptable to
% consider two colors as the same. For example, set fuzz to 10 and the
% color red at intensities of 100 and 102 respectively are now interpreted
% as the same color for the purposes of the floodfill.
%
*/
WandExport MagickBooleanType MagickTransparentImage(MagickWand *wand,
const PixelWand *target,const double alpha,const double fuzz)
{
return(MagickPaintTransparentImage(wand,target,alpha,fuzz));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k R e g i o n O f I n t e r e s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickRegionOfInterestImage() extracts a region of the image and returns it
% as a new wand.
%
% The format of the MagickRegionOfInterestImage method is:
%
% MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
% const size_t width,const size_t height,const ssize_t x,
% const ssize_t y)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o width: the region width.
%
% o height: the region height.
%
% o x: the region x offset.
%
% o y: the region y offset.
%
*/
WandExport MagickWand *MagickRegionOfInterestImage(MagickWand *wand,
const size_t width,const size_t height,const ssize_t x,
const ssize_t y)
{
return(MagickGetImageRegion(wand,width,height,x,y));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k S e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSetImagePixels() accepts pixel datand stores it in the image at the
% location you specify. The method returns MagickFalse on success otherwise
% MagickTrue if an error is encountered. The pixel data can be either char,
% short int, int, ssize_t, float, or double in the order specified by map.
%
% Suppose your want to upload the first scanline of a 640x480 image from
% character data in red-green-blue order:
%
% MagickSetImagePixels(wand,0,0,640,1,"RGB",CharPixel,pixels);
%
% The format of the MagickSetImagePixels method is:
%
% MagickBooleanType MagickSetImagePixels(MagickWand *wand,
% const ssize_t x,const ssize_t y,const size_t columns,
% const size_t rows,const char *map,const StorageType storage,
% const void *pixels)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x, y, columns, rows: These values define the perimeter of a region
% of pixels you want to define.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose from
% these types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel,
% or DoublePixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
*/
WandExport MagickBooleanType MagickSetImagePixels(MagickWand *wand,
const ssize_t x,const ssize_t y,const size_t columns,
const size_t rows,const char *map,const StorageType storage,
const void *pixels)
{
return(MagickImportImagePixels(wand,x,y,columns,rows,map,storage,pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k W r i t e I m a g e B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickWriteImageBlob() implements direct to memory image formats. It
% returns the image as a blob and its length. Use MagickSetFormat() to
% set the format of the returned blob (GIF, JPEG, PNG, etc.).
%
% Use MagickRelinquishMemory() to free the blob when you are done with it.
%
% The format of the MagickWriteImageBlob method is:
%
% unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o length: the length of the blob.
%
*/
WandExport unsigned char *MagickWriteImageBlob(MagickWand *wand,size_t *length)
{
return(MagickGetImageBlob(wand,length));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelView() returns a pixel view required for all other methods in the
% Pixel View API.
%
% The format of the NewPixelView method is:
%
% PixelView *NewPixelView(MagickWand *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands,
const size_t number_threads)
{
PixelWand
***pixel_wands;
register ssize_t
i;
pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads,
sizeof(*pixel_wands));
if (pixel_wands == (PixelWand ***) NULL)
return((PixelWand ***) NULL);
(void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_wands[i]=NewPixelWands(number_wands);
if (pixel_wands[i] == (PixelWand **) NULL)
return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads));
}
return(pixel_wands);
}
WandExport PixelView *NewPixelView(MagickWand *wand)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->wand=wand;
pixel_view->view=AcquireCacheView(pixel_view->wand->images);
pixel_view->region.width=wand->images->columns;
pixel_view->region.height=wand->images->rows;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w P i x e l V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewPixelViewRegion() returns a pixel view required for all other methods
% in the Pixel View API.
%
% The format of the NewPixelViewRegion method is:
%
% PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixel_wands view.
%
*/
WandExport PixelView *NewPixelViewRegion(MagickWand *wand,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
PixelView
*pixel_view;
assert(wand != (MagickWand *) NULL);
assert(wand->signature == MagickSignature);
pixel_view=(PixelView *) AcquireMagickMemory(sizeof(*pixel_view));
if (pixel_view == (PixelView *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
(void) ResetMagickMemory(pixel_view,0,sizeof(*pixel_view));
pixel_view->id=AcquireWandId();
(void) FormatLocaleString(pixel_view->name,MaxTextExtent,"%s-%.20g",
PixelViewId,(double) pixel_view->id);
pixel_view->exception=AcquireExceptionInfo();
pixel_view->view=AcquireCacheView(pixel_view->wand->images);
pixel_view->wand=wand;
pixel_view->region.width=width;
pixel_view->region.height=height;
pixel_view->region.x=x;
pixel_view->region.y=y;
pixel_view->number_threads=GetOpenMPMaximumThreads();
pixel_view->pixel_wands=AcquirePixelsThreadSet(pixel_view->region.width,
pixel_view->number_threads);
if (pixel_view->pixel_wands == (PixelWand ***) NULL)
ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
pixel_view->debug=IsEventLogging();
pixel_view->signature=WandSignature;
return(pixel_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l G e t N e x t R o w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelGetNextRow() returns the next row as an array of pixel wands from the
% pixel iterator.
%
% The format of the PixelGetNextRow method is:
%
% PixelWand **PixelGetNextRow(PixelIterator *iterator,
% size_t *number_wands)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o number_wands: the number of pixel wands.
%
*/
WandExport PixelWand **PixelGetNextRow(PixelIterator *iterator)
{
size_t
number_wands;
return(PixelGetNextIteratorRow(iterator,&number_wands));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i x e l I t e r a t o r G e t E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PixelIteratorGetException() returns the severity, reason, and description of
% any error that occurs when using other methods in this API.
%
% The format of the PixelIteratorGetException method is:
%
% char *PixelIteratorGetException(const Pixeliterator *iterator,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o iterator: the pixel iterator.
%
% o severity: the severity of the error is returned here.
%
*/
WandExport char *PixelIteratorGetException(const PixelIterator *iterator,
ExceptionType *severity)
{
return(PixelGetIteratorException(iterator,severity));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelViewIterator() iterates over the pixel view in parallel and calls
% your set method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetPixelViewIterator method is:
%
% MagickBooleanType SetPixelViewIterator(PixelView *destination,
% SetPixelViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the pixel view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType SetPixelViewIterator(PixelView *destination,
SetPixelViewMethod set,void *context)
{
#define SetPixelViewTag "PixelView/Set"
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(destination != (PixelView *) NULL);
assert(destination->signature == WandSignature);
if (set == (SetPixelViewMethod) NULL)
return(MagickFalse);
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=destination->region.y; y < (ssize_t) destination->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->region.x,
y,destination->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(destination->view);
if (set(destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetIndexPixelComponent(indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_SetPixelViewIterator)
#endif
proceed=SetImageProgress(destination_image,SetPixelViewTag,progress++,
destination->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferPixelViewIterator() iterates over two pixel views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% region is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination pixel view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferPixelViewIterator method is:
%
% MagickBooleanType TransferPixelViewIterator(PixelView *source,
% PixelView *destination,TransferPixelViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o destination: the destination pixel view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType TransferPixelViewIterator(PixelView *source,
PixelView *destination,TransferPixelViewMethod transfer,void *context)
{
#define TransferPixelViewTag "PixelView/Transfer"
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (transfer == (TransferPixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
destination_image=destination->wand->images;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
register IndexPacket
*restrict destination_indexes;
register ssize_t
x;
register PixelPacket
*restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->region.x,y,
source->region.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (source_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetIndex(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->region.x,y,destination->region.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view);
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetBlackQuantum(destination->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (destination_image->storage_class == PseudoClass)
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelSetIndex(destination->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (transfer(source,destination,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) destination->region.width; x++)
PixelGetQuantumColor(destination->pixel_wands[id][x],
destination_pixels+x);
if (destination_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) destination->region.width; x++)
SetIndexPixelComponent(destination_indexes+x,PixelGetBlackQuantum(
destination->pixel_wands[id][x]));
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_TransferPixelViewIterator)
#endif
proceed=SetImageProgress(source_image,TransferPixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e P i x e l V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdatePixelViewIterator() iterates over the pixel view in parallel and calls
% your update method for each scanline of the view. The pixel region is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% Use this pragma:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdatePixelViewIterator method is:
%
% MagickBooleanType UpdatePixelViewIterator(PixelView *source,
% UpdatePixelViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source pixel view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
WandExport MagickBooleanType UpdatePixelViewIterator(PixelView *source,
UpdatePixelViewMethod update,void *context)
{
#define UpdatePixelViewTag "PixelView/Update"
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(source != (PixelView *) NULL);
assert(source->signature == WandSignature);
if (update == (UpdatePixelViewMethod) NULL)
return(MagickFalse);
source_image=source->wand->images;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) shared(progress,status)
#endif
for (y=source->region.y; y < (ssize_t) source->region.height; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->region.x,y,
source->region.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(
source->view));
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(source->view);
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
PixelSetBlackQuantum(source->pixel_wands[id][x],
GetIndexPixelComponent(indexes+x));
if (update(source,context) == MagickFalse)
status=MagickFalse;
for (x=0; x < (ssize_t) source->region.width; x++)
PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x);
if (source_image->colorspace == CMYKColorspace)
for (x=0; x < (ssize_t) source->region.width; x++)
SetIndexPixelComponent(indexes+x,PixelGetBlackQuantum(
source->pixel_wands[id][x]));
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickWand_UpdatePixelViewIterator)
#endif
proceed=SetImageProgress(source_image,UpdatePixelViewTag,progress++,
source->region.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
#endif
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
interior,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
width=frame_info->width-frame_info->x-bevel_width;
height=frame_info->height-frame_info->y-bevel_width;
if ((width < image->columns) || (height < image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&interior);
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&interior);
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&interior);
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,frame_image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior to interior color.
*/
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&interior,q,frame_indexes);
q++;
frame_indexes++;
}
else
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
(void) CopyMagickMemory(q,p,image->columns*sizeof(*p));
if ((image->colorspace == CMYKColorspace) &&
(frame_image->colorspace == CMYKColorspace))
{
(void) CopyMagickMemory(frame_indexes,indexes,image->columns*
sizeof(*indexes));
frame_indexes+=image->columns;
}
q+=image->columns;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
if ((image->compose != CopyCompositeOp) &&
((image->compose != OverCompositeOp) || (image->matte != MagickFalse)))
{
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
(void) CompositeImage(frame_image,image->compose,image,x,y);
}
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_binop__minus_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__minus_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int8)
// A*D function (colscale): GB (_AxD__minus_int8)
// D*A function (rowscale): GB (_DxB__minus_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__minus_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__minus_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int8)
// C=scalar+B GB (_bind1st__minus_int8)
// C=scalar+B' GB (_bind1st_tran__minus_int8)
// C=A+scalar GB (_bind2nd__minus_int8)
// C=A'+scalar GB (_bind2nd_tran__minus_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x - y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT8 || GxB_NO_MINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__minus_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__minus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__minus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__minus_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__minus_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__minus_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB (_bind1st_tran__minus_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB (_bind2nd_tran__minus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wino_conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "wino_conv_kernel_x86.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = WINO_MAX(data[i], ( float )0);
if (activation > 0)
{
data[i] = WINO_MIN(data[i], ( float )activation);
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block,
float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch,
int outw, int outh, int outch, int num_thread)
{
size_t elemsize = sizeof(float);
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
float* bottom_blob_bordered = bottom_blob;
int outw_align = (outw + 3) / 4 * 4;
int outh_align = (outh + 3) / 4 * 4;
w = outw_align + 2;
h = outh_align + 2;
// BEGIN transform input
float* bottom_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 4 * inch * tiles;
bottom_blob_tm = transform_input;
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered + q * w * h;
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q;
float* out_tm1 = out_tm0 + tiles_n;
float* out_tm2 = out_tm0 + 2 * tiles_n;
float* out_tm3 = out_tm0 + 3 * tiles_n;
float* out_tm4 = out_tm0 + 4 * tiles_n;
float* out_tm5 = out_tm0 + 5 * tiles_n;
float* out_tm6 = out_tm0 + 6 * tiles_n;
float* out_tm7 = out_tm0 + 7 * tiles_n;
float* out_tm8 = out_tm0 + 8 * tiles_n;
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
// BEGIN dot
float* top_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 36 * tiles;
top_blob_tm = dot_block;
#pragma omp parallel for num_threads(num_thread)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp << 3;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
float* output4_tm = top_blob_tm + tiles_n * (p + 4);
float* output5_tm = top_blob_tm + tiles_n * (p + 5);
float* output6_tm = top_blob_tm + tiles_n * (p + 6);
float* output7_tm = top_blob_tm + tiles_n * (p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm + 36 * tiles * p;
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr =
kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 4;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
}
}
// END dot
// BEGIN transform output
float* top_blob_bordered = NULL;
if (outw_align == outw && outh_align == outh)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered = output_bordered;
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm + 36 * tiles * p;
float* outRow0 = top_blob_bordered + outw_align * outh_align * p;
float* outRow1 = outRow0 + outw_align;
float* outRow2 = outRow0 + outw_align * 2;
float* outRow3 = outRow0 + outw_align * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw_align * 3;
outRow1 += outw_align * 3;
outRow2 += outw_align * 3;
outRow3 += outw_align * 3;
}
}
}
// END transform output
if (outw_align != outw || outh_align != outw)
{
delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0);
}
}
void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch)
{
float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float));
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36;
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3] = {0};
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
float* kernel_tm_test = kernel_wino;
for (int r = 0; r < 9; r++)
{
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36;
const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36;
const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36;
const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36;
float* ktmp = kernel_tm_test + p / 8 * inch * 32;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm_test += 4 * inch * outch;
}
free(kernel_tm);
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int output_c = output_tensor->dims[1];
int output_h = output_tensor->dims[2];
int output_w = output_tensor->dims[3];
int pad_h = param->pad_h0;
int pad_w = param->pad_w0;
float* kernel = ( float* )filter_tensor->data;
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
int block_h = (output_h + TILE - 1) / TILE;
int block_w = (output_w + TILE - 1) / TILE;
int block = block_h * block_w;
int padded_inh = TILE * block_h + 2;
int padded_inw = TILE * block_w + 2;
int pad_inhw = padded_inh * padded_inw;
int outw = block_w * TILE;
int outh = block_h * TILE;
priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float));
memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float));
priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float));
priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float));
priv_info->output_bordered = NULL;
if (outw != output_w || outh != output_h)
{
priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float));
}
conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (priv_info->input_pad)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
if (priv_info->dot_block)
{
sys_free(priv_info->dot_block);
priv_info->dot_block = NULL;
}
if (priv_info->transform_input)
{
sys_free(priv_info->transform_input);
priv_info->transform_input = NULL;
}
if (priv_info->output_bordered)
{
sys_free(priv_info->output_bordered);
priv_info->output_bordered = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int group = param->group;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_c_g = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int input_size_g = in_c_g * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* biases = NULL;
if (bias_tensor != NULL)
biases = ( float* )bias_tensor->data;
for (int i = 0; i < batch; i++)
{
for (int g = 0; g < group; g++)
{
pad_0_align_3D(priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w,
in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0);
conv3x3s1_winograd43_sse(priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g,
output + i * out_c * out_h * out_w, priv_info->interleave_buffer,
priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered,
biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread);
}
}
if (act_type >= 0)
{
relu(output, batch * output_size, act_type);
}
return 0;
} |
sstruct_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_SStructPMatrix class.
*
*****************************************************************************/
#include "_hypre_sstruct_mv.h"
/*==========================================================================
* SStructPMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixRef( hypre_SStructPMatrix *matrix,
hypre_SStructPMatrix **matrix_ref )
{
hypre_SStructPMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixCreate( MPI_Comm comm,
hypre_SStructPGrid *pgrid,
hypre_SStructStencil **stencils,
hypre_SStructPMatrix **pmatrix_ptr )
{
hypre_SStructPMatrix *pmatrix;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
hypre_StructStencil *sstencil;
HYPRE_Int *vars;
hypre_Index *sstencil_shape;
HYPRE_Int sstencil_size;
HYPRE_Int new_dim;
HYPRE_Int *new_sizes;
hypre_Index **new_shapes;
HYPRE_Int size;
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj;
HYPRE_Int i, j, k;
pmatrix = hypre_TAlloc(hypre_SStructPMatrix, 1, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixComm(pmatrix) = comm;
hypre_SStructPMatrixPGrid(pmatrix) = pgrid;
hypre_SStructPMatrixStencils(pmatrix) = stencils;
nvars = hypre_SStructPGridNVars(pgrid);
hypre_SStructPMatrixNVars(pmatrix) = nvars;
/* create sstencils */
smaps = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
sstencils = hypre_TAlloc(hypre_StructStencil **, nvars, HYPRE_MEMORY_HOST);
new_sizes = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
new_shapes = hypre_TAlloc(hypre_Index *, nvars, HYPRE_MEMORY_HOST);
size = 0;
for (vi = 0; vi < nvars; vi++)
{
sstencils[vi] = hypre_TAlloc(hypre_StructStencil *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
sstencils[vi][vj] = NULL;
new_sizes[vj] = 0;
}
sstencil = hypre_SStructStencilSStencil(stencils[vi]);
vars = hypre_SStructStencilVars(stencils[vi]);
sstencil_shape = hypre_StructStencilShape(sstencil);
sstencil_size = hypre_StructStencilSize(sstencil);
smaps[vi] = hypre_TAlloc(HYPRE_Int, sstencil_size, HYPRE_MEMORY_HOST);
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
new_sizes[j]++;
}
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
new_shapes[vj] = hypre_TAlloc(hypre_Index, new_sizes[vj], HYPRE_MEMORY_HOST);
new_sizes[vj] = 0;
}
}
for (i = 0; i < sstencil_size; i++)
{
j = vars[i];
k = new_sizes[j];
hypre_CopyIndex(sstencil_shape[i], new_shapes[j][k]);
smaps[vi][i] = k;
new_sizes[j]++;
}
new_dim = hypre_StructStencilNDim(sstencil);
for (vj = 0; vj < nvars; vj++)
{
if (new_sizes[vj])
{
sstencils[vi][vj] =
hypre_StructStencilCreate(new_dim, new_sizes[vj], new_shapes[vj]);
}
size = hypre_max(size, new_sizes[vj]);
}
}
hypre_SStructPMatrixSMaps(pmatrix) = smaps;
hypre_SStructPMatrixSStencils(pmatrix) = sstencils;
hypre_TFree(new_sizes, HYPRE_MEMORY_HOST);
hypre_TFree(new_shapes, HYPRE_MEMORY_HOST);
/* create smatrices */
smatrices = hypre_TAlloc(hypre_StructMatrix **, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
smatrices[vi] = hypre_TAlloc(hypre_StructMatrix *, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
smatrices[vi][vj] = NULL;
if (sstencils[vi][vj] != NULL)
{
sgrid = hypre_SStructPGridSGrid(pgrid, vi);
smatrices[vi][vj] =
hypre_StructMatrixCreate(comm, sgrid, sstencils[vi][vj]);
}
}
}
hypre_SStructPMatrixSMatrices(pmatrix) = smatrices;
/* create symmetric */
symmetric = hypre_TAlloc(HYPRE_Int *, nvars, HYPRE_MEMORY_HOST);
for (vi = 0; vi < nvars; vi++)
{
symmetric[vi] = hypre_TAlloc(HYPRE_Int, nvars, HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
symmetric[vi][vj] = 0;
}
}
hypre_SStructPMatrixSymmetric(pmatrix) = symmetric;
hypre_SStructPMatrixSEntriesSize(pmatrix) = size;
hypre_SStructPMatrixSEntries(pmatrix) = hypre_TAlloc(HYPRE_Int, size, HYPRE_MEMORY_HOST);
hypre_SStructPMatrixRefCount(pmatrix) = 1;
*pmatrix_ptr = pmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixDestroy( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructStencil **stencils;
HYPRE_Int nvars;
HYPRE_Int **smaps;
hypre_StructStencil ***sstencils;
hypre_StructMatrix ***smatrices;
HYPRE_Int **symmetric;
HYPRE_Int vi, vj;
if (pmatrix)
{
hypre_SStructPMatrixRefCount(pmatrix) --;
if (hypre_SStructPMatrixRefCount(pmatrix) == 0)
{
stencils = hypre_SStructPMatrixStencils(pmatrix);
nvars = hypre_SStructPMatrixNVars(pmatrix);
smaps = hypre_SStructPMatrixSMaps(pmatrix);
sstencils = hypre_SStructPMatrixSStencils(pmatrix);
smatrices = hypre_SStructPMatrixSMatrices(pmatrix);
symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
HYPRE_SStructStencilDestroy(stencils[vi]);
hypre_TFree(smaps[vi], HYPRE_MEMORY_HOST);
for (vj = 0; vj < nvars; vj++)
{
hypre_StructStencilDestroy(sstencils[vi][vj]);
hypre_StructMatrixDestroy(smatrices[vi][vj]);
}
hypre_TFree(sstencils[vi], HYPRE_MEMORY_HOST);
hypre_TFree(smatrices[vi], HYPRE_MEMORY_HOST);
hypre_TFree(symmetric[vi], HYPRE_MEMORY_HOST);
}
hypre_TFree(stencils, HYPRE_MEMORY_HOST);
hypre_TFree(smaps, HYPRE_MEMORY_HOST);
hypre_TFree(sstencils, HYPRE_MEMORY_HOST);
hypre_TFree(smatrices, HYPRE_MEMORY_HOST);
hypre_TFree(symmetric, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_SStructPMatrixSEntries(pmatrix), HYPRE_MEMORY_HOST);
hypre_TFree(pmatrix, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixInitialize( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int **symmetric = hypre_SStructPMatrixSymmetric(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
/* HYPRE_Int num_ghost[2*HYPRE_MAXDIM]; */
/* HYPRE_Int vi, vj, d, ndim; */
#if 0
ndim = hypre_SStructPMatrixNDim(pmatrix);
/* RDF: Why are the ghosts being reset to one? Maybe it needs to be at least
* one to set shared coefficients correctly, but not exactly one? */
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = 1;
}
#endif
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
HYPRE_StructMatrixSetSymmetric(smatrix, symmetric[vi][vj]);
/* hypre_StructMatrixSetNumGhost(smatrix, num_ghost); */
hypre_StructMatrixInitialize(smatrix);
/* needed to get AddTo accumulation correct between processors */
hypre_StructMatrixClearGhostValues(smatrix);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetValues( hypre_SStructPMatrix *pmatrix,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
hypre_Box *box, *grow_box;
HYPRE_Int *sentries;
HYPRE_Int i;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries, values,
action, -1, 0);
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
HYPRE_Int done = 0;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (hypre_IndexInBox(index, box))
{
done = 1;
break;
}
}
if (!done)
{
grow_box = hypre_BoxCreate(hypre_BoxArrayNDim(grid_boxes));
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
hypre_CopyBox(box, grow_box);
hypre_BoxGrowByIndex(grow_box, varoffset);
if (hypre_IndexInBox(index, grow_box))
{
hypre_StructMatrixSetValues(smatrix, index, nentries, sentries,
values, action, i, 1);
break;
}
}
hypre_BoxDestroy(grow_box);
}
}
else
{
/* Set */
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
hypre_ForBoxI(i, grid_boxes)
{
box = hypre_BoxArrayBox(grid_boxes, i);
if (!hypre_IndexInBox(index, box))
{
hypre_StructMatrixClearValues(smatrix, index, nentries, sentries, i, 1);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetBoxValues( hypre_SStructPMatrix *pmatrix,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructPMatrixNDim(pmatrix);
hypre_SStructStencil *stencil = hypre_SStructPMatrixStencil(pmatrix, var);
HYPRE_Int *smap = hypre_SStructPMatrixSMap(pmatrix, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_StructMatrix *smatrix;
hypre_BoxArray *grid_boxes;
HYPRE_Int *sentries;
HYPRE_Int i, j;
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entries[0]]);
sentries = hypre_SStructPMatrixSEntries(pmatrix);
for (i = 0; i < nentries; i++)
{
sentries[i] = smap[entries[i]];
}
/* set values inside the grid */
hypre_StructMatrixSetBoxValues(smatrix, set_box, value_box, nentries, sentries,
values, action, -1, 0);
/* TODO: Why need DeviceSync? */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
hypre_SyncCudaDevice(hypre_handle());
#endif
/* set (AddTo/Get) or clear (Set) values outside the grid in ghost zones */
if (action != 0)
{
/* AddTo/Get */
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
hypre_Index varoffset;
hypre_BoxArray *left_boxes, *done_boxes, *temp_boxes;
hypre_Box *left_box, *done_box, *int_box;
hypre_SStructVariableGetOffset(hypre_SStructPGridVarType(pgrid, var),
hypre_SStructPGridNDim(pgrid), varoffset);
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
left_boxes = hypre_BoxArrayCreate(1, ndim);
done_boxes = hypre_BoxArrayCreate(2, ndim);
temp_boxes = hypre_BoxArrayCreate(0, ndim);
/* done_box always points to the first box in done_boxes */
done_box = hypre_BoxArrayBox(done_boxes, 0);
/* int_box always points to the second box in done_boxes */
int_box = hypre_BoxArrayBox(done_boxes, 1);
hypre_CopyBox(set_box, hypre_BoxArrayBox(left_boxes, 0));
hypre_BoxArraySetSize(left_boxes, 1);
hypre_SubtractBoxArrays(left_boxes, grid_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 0);
hypre_ForBoxI(i, grid_boxes)
{
hypre_SubtractBoxArrays(left_boxes, done_boxes, temp_boxes);
hypre_BoxArraySetSize(done_boxes, 1);
hypre_CopyBox(hypre_BoxArrayBox(grid_boxes, i), done_box);
hypre_BoxGrowByIndex(done_box, varoffset);
hypre_ForBoxI(j, left_boxes)
{
left_box = hypre_BoxArrayBox(left_boxes, j);
hypre_IntersectBoxes(left_box, done_box, int_box);
hypre_StructMatrixSetBoxValues(smatrix, int_box, value_box,
nentries, sentries,
values, action, i, 1);
}
}
hypre_BoxArrayDestroy(left_boxes);
hypre_BoxArrayDestroy(done_boxes);
hypre_BoxArrayDestroy(temp_boxes);
}
else
{
/* Set */
hypre_BoxArray *diff_boxes;
hypre_Box *grid_box, *diff_box;
grid_boxes = hypre_StructGridBoxes(hypre_StructMatrixGrid(smatrix));
diff_boxes = hypre_BoxArrayCreate(0, ndim);
hypre_ForBoxI(i, grid_boxes)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(set_box, grid_box, diff_boxes);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
hypre_StructMatrixClearBoxValues(smatrix, diff_box, nentries, sentries,
i, 1);
}
}
hypre_BoxArrayDestroy(diff_boxes);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAccumulate( hypre_SStructPMatrix *pmatrix )
{
hypre_SStructPGrid *pgrid = hypre_SStructPMatrixPGrid(pmatrix);
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
HYPRE_Int ndim = hypre_SStructPGridNDim(pgrid);
HYPRE_SStructVariable *vartypes = hypre_SStructPGridVarTypes(pgrid);
hypre_StructMatrix *smatrix;
hypre_Index varoffset;
HYPRE_Int num_ghost[2*HYPRE_MAXDIM];
hypre_StructGrid *sgrid;
HYPRE_Int vi, vj, d;
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
hypre_CommHandle *comm_handle;
/* if values already accumulated, just return */
if (hypre_SStructPMatrixAccumulated(pmatrix))
{
return hypre_error_flag;
}
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
sgrid = hypre_StructMatrixGrid(smatrix);
/* assumes vi and vj vartypes are the same */
hypre_SStructVariableGetOffset(vartypes[vi], ndim, varoffset);
for (d = 0; d < ndim; d++)
{
num_ghost[2*d] = num_ghost[2*d+1] = hypre_IndexD(varoffset, d);
}
/* accumulate values from AddTo */
hypre_CreateCommInfoFromNumGhost(sgrid, num_ghost, &comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixDataSpace(smatrix),
hypre_StructMatrixNumValues(smatrix), NULL, 1,
hypre_StructMatrixComm(smatrix),
&comm_pkg);
hypre_InitializeCommunication(comm_pkg,
hypre_StructMatrixData(smatrix),
hypre_StructMatrixData(smatrix),
1, 0, &comm_handle);
hypre_FinalizeCommunication(comm_handle);
hypre_CommInfoDestroy(comm_info);
hypre_CommPkgDestroy(comm_pkg);
}
}
}
hypre_SStructPMatrixAccumulated(pmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixAssemble( hypre_SStructPMatrix *pmatrix )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
hypre_SStructPMatrixAccumulate(pmatrix);
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_StructMatrixClearGhostValues(smatrix);
hypre_StructMatrixAssemble(smatrix);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixSetSymmetric( hypre_SStructPMatrix *pmatrix,
HYPRE_Int var,
HYPRE_Int to_var,
HYPRE_Int symmetric )
{
HYPRE_Int **pmsymmetric = hypre_SStructPMatrixSymmetric(pmatrix);
HYPRE_Int vstart = var;
HYPRE_Int vsize = 1;
HYPRE_Int tstart = to_var;
HYPRE_Int tsize = 1;
HYPRE_Int v, t;
if (var == -1)
{
vstart = 0;
vsize = hypre_SStructPMatrixNVars(pmatrix);
}
if (to_var == -1)
{
tstart = 0;
tsize = hypre_SStructPMatrixNVars(pmatrix);
}
for (v = vstart; v < vsize; v++)
{
for (t = tstart; t < tsize; t++)
{
pmsymmetric[v][t] = symmetric;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructPMatrixPrint( const char *filename,
hypre_SStructPMatrix *pmatrix,
HYPRE_Int all )
{
HYPRE_Int nvars = hypre_SStructPMatrixNVars(pmatrix);
hypre_StructMatrix *smatrix;
HYPRE_Int vi, vj;
char new_filename[255];
for (vi = 0; vi < nvars; vi++)
{
for (vj = 0; vj < nvars; vj++)
{
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, vi, vj);
if (smatrix != NULL)
{
hypre_sprintf(new_filename, "%s.%02d.%02d", filename, vi, vj);
hypre_StructMatrixPrint(new_filename, smatrix, all);
}
}
}
return hypre_error_flag;
}
/*==========================================================================
* SStructUMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixInitialize( hypre_SStructMatrix *matrix )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int nparts = hypre_SStructGraphNParts(graph);
hypre_SStructPGrid **pgrids = hypre_SStructGraphPGrids(graph);
hypre_SStructStencil ***stencils = hypre_SStructGraphStencils(graph);
HYPRE_Int nUventries = hypre_SStructGraphNUVEntries(graph);
HYPRE_Int *iUventries = hypre_SStructGraphIUVEntries(graph);
hypre_SStructUVEntry **Uventries = hypre_SStructGraphUVEntries(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
hypre_StructGrid *sgrid;
hypre_SStructStencil *stencil;
HYPRE_Int *split;
HYPRE_Int nvars;
HYPRE_Int nrows, rowstart, nnzs ;
HYPRE_Int part, var, entry, b, m, mi;
HYPRE_Int *row_sizes;
HYPRE_Int max_row_size;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *ghost_box;
hypre_IndexRef start;
hypre_Index loop_size, stride;
HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
#ifdef HYPRE_USING_OPENMP
HYPRE_IJMatrixSetOMPFlag(ijmatrix, 1); /* Use OpenMP */
#endif
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
rowstart = hypre_SStructGridGhstartRank(grid);
nrows = hypre_SStructGridGhlocalSize(grid) ;
}
else /* matrix_type == HYPRE_PARCSR */
{
rowstart = hypre_SStructGridStartRank(grid);
nrows = hypre_SStructGridLocalSize(grid);
}
/* set row sizes */
m = 0;
max_row_size = 0;
ghost_box = hypre_BoxCreate(ndim);
row_sizes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_SetIndex(stride, 1);
for (part = 0; part < nparts; part++)
{
nvars = hypre_SStructPGridNVars(pgrids[part]);
for (var = 0; var < nvars; var++)
{
sgrid = hypre_SStructPGridSGrid(pgrids[part], var);
stencil = stencils[part][var];
split = hypre_SStructMatrixSplit(matrix, part, var);
nnzs = 0;
for (entry = 0; entry < hypre_SStructStencilSize(stencil); entry++)
{
if (split[entry] == -1)
{
nnzs++;
}
}
#if 0
/* TODO: For now, assume stencil is full/complete */
if (hypre_SStructMatrixSymmetric(matrix))
{
nnzs = 2*nnzs - 1;
}
#endif
boxes = hypre_StructGridBoxes(sgrid);
hypre_ForBoxI(b, boxes)
{
box = hypre_BoxArrayBox(boxes, b);
hypre_CopyBox(box, ghost_box);
if (matrix_type == HYPRE_SSTRUCT || matrix_type == HYPRE_STRUCT)
{
hypre_BoxGrowByArray(ghost_box, hypre_StructGridNumGhost(sgrid));
}
start = hypre_BoxIMin(box);
hypre_BoxGetSize(box, loop_size);
zypre_BoxLoop1Begin(hypre_SStructMatrixNDim(matrix), loop_size,
ghost_box, start, stride, mi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,mi) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop1For(mi)
{
row_sizes[m+mi] = nnzs;
}
zypre_BoxLoop1End(mi);
m += hypre_BoxVolume(ghost_box);
}
max_row_size = hypre_max(max_row_size, nnzs);
if (nvneighbors[part][var])
{
max_row_size =
hypre_max(max_row_size, hypre_SStructStencilSize(stencil));
}
}
}
hypre_BoxDestroy(ghost_box);
/* GEC0902 essentially for each UVentry we figure out how many extra columns
* we need to add to the rowsizes */
/* RDF: THREAD? */
for (entry = 0; entry < nUventries; entry++)
{
mi = iUventries[entry];
m = hypre_SStructUVEntryRank(Uventries[mi]) - rowstart;
if ((m > -1) && (m < nrows))
{
row_sizes[m] += hypre_SStructUVEntryNUEntries(Uventries[mi]);
max_row_size = hypre_max(max_row_size, row_sizes[m]);
}
}
/* ZTODO: Update row_sizes based on neighbor off-part couplings */
HYPRE_IJMatrixSetRowSizes (ijmatrix, (const HYPRE_Int *) row_sizes);
hypre_TFree(row_sizes, HYPRE_MEMORY_HOST);
hypre_SStructMatrixTmpRowCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE);
hypre_SStructMatrixTmpColCoords(matrix) = hypre_CTAlloc(HYPRE_BigInt, max_row_size, HYPRE_MEMORY_DEVICE);
hypre_SStructMatrixTmpCoeffs(matrix) = hypre_CTAlloc(HYPRE_Complex, max_row_size, HYPRE_MEMORY_DEVICE);
HYPRE_IJMatrixInitialize(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager - here we need to check the
* neighbor box manager also
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Index index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_Index to_index;
hypre_SStructUVEntry *Uventry;
hypre_BoxManEntry *boxman_entry;
hypre_SStructBoxManInfo *entry_info;
HYPRE_BigInt row_coord;
HYPRE_BigInt *col_coords;
HYPRE_Int ncoeffs;
HYPRE_Complex *coeffs;
HYPRE_Int i, entry;
HYPRE_BigInt Uverank;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(grid, part, index, var, &boxman_entry);
if (boxman_entry == NULL)
{
hypre_error_in_arg(1);
hypre_error_in_arg(2);
hypre_error_in_arg(3);
return hypre_error_flag;
}
else
{
hypre_BoxManEntryGetInfo(boxman_entry, (void **) &entry_info);
}
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index,
&row_coord, matrix_type);
col_coords = hypre_SStructMatrixTmpColCoords(matrix);
coeffs = hypre_SStructMatrixTmpCoeffs(matrix);
ncoeffs = 0;
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < size)
{
/* stencil entries */
offset = shape[entry];
hypre_AddIndexes(index, offset, ndim, to_index);
hypre_SStructGridFindBoxManEntry(dom_grid, part, to_index, vars[entry],
&boxman_entry);
/* if not local, check neighbors */
if (boxman_entry == NULL)
hypre_SStructGridFindNborBoxManEntry(dom_grid, part, to_index,
vars[entry], &boxman_entry);
if (boxman_entry != NULL)
{
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, to_index,
&col_coords[ncoeffs],matrix_type);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
else
{
/* non-stencil entries */
entry -= size;
hypre_SStructGraphGetUVEntryRank(graph, part, var, index, &Uverank);
if (Uverank > -1)
{
Uventry = hypre_SStructGraphUVEntry(graph, Uverank);
col_coords[ncoeffs] = hypre_SStructUVEntryToRank(Uventry, entry);
coeffs[ncoeffs] = values[i];
ncoeffs++;
}
}
}
#if defined(HYPRE_USING_CUDA)
HYPRE_BigInt *row_coords = hypre_SStructMatrixTmpRowCoords(matrix);
if ( hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(ijmatrix)) == HYPRE_EXEC_DEVICE )
{
hypreDevice_BigIntFilln(row_coords, ncoeffs, row_coord);
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, ncoeffs, NULL, row_coords,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, ncoeffs, NULL, row_coords,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else
{
// RL:TODO
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
}
else
#endif
{
if (action > 0)
{
HYPRE_IJMatrixAddToValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues(ijmatrix, 1, &ncoeffs, &row_coord,
(const HYPRE_BigInt *) col_coords,
(const HYPRE_Complex *) coeffs);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, 1, &ncoeffs, &row_coord,
col_coords, values);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Note: Entries must all be of type stencil or non-stencil, but not both.
*
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* 9/09 - AB: modified to use the box manager- here we need to check the
* neighbor box manager also
*
* To illustrate what is computed below before calling IJSetValues2(), consider
* the following example of a 5-pt stencil (c,w,e,s,n) on a 3x2 grid (the 'x' in
* arrays 'cols' and 'ijvalues' indicates "no data"):
*
* nrows = 6
* ncols = 3 4 3 3 4 3
* rows = 0 1 2 3 4 5
* row_indexes = 0 5 10 15 20 25
* cols = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* ijvalues = . . . x x . . . . x . . . x x . . . x x . . . . x . . . x x
* entry = c e n c w e n c w n c e s c w e s c w s
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixSetBoxValues( hypre_SStructMatrix *matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructGrid *dom_grid = hypre_SStructGraphDomainGrid(graph);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int *vars = hypre_SStructStencilVars(stencil);
hypre_Index *shape = hypre_SStructStencilShape(stencil);
HYPRE_Int size = hypre_SStructStencilSize(stencil);
hypre_IndexRef offset;
hypre_BoxManEntry **boxman_entries;
HYPRE_Int nboxman_entries;
hypre_BoxManEntry **boxman_to_entries;
HYPRE_Int nboxman_to_entries;
HYPRE_Int nrows;
HYPRE_Int *ncols, *row_indexes;;
HYPRE_BigInt *rows, *cols;
HYPRE_Complex *ijvalues;
hypre_Box *box;
hypre_Box *to_box;
hypre_Box *map_box;
hypre_Box *int_box;
hypre_Index index, stride, loop_size;
hypre_IndexRef start;
hypre_Index rs, cs;
HYPRE_BigInt row_base, col_base;
HYPRE_Int ei, entry, ii, jj, i;
HYPRE_Int matrix_type = hypre_SStructMatrixObjectType(matrix);
box = hypre_BoxCreate(ndim);
/*------------------------------------------
* all stencil entries
*------------------------------------------*/
if (entries[0] < size)
{
to_box = hypre_BoxCreate(ndim);
map_box = hypre_BoxCreate(ndim);
int_box = hypre_BoxCreate(ndim);
nrows = hypre_BoxVolume(set_box);
ncols = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
rows = hypre_CTAlloc(HYPRE_BigInt, nrows, HYPRE_MEMORY_DEVICE);
row_indexes = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE);
cols = hypre_CTAlloc(HYPRE_BigInt, nrows*nentries, HYPRE_MEMORY_DEVICE);
ijvalues = hypre_CTAlloc(HYPRE_Complex, nrows*nentries, HYPRE_MEMORY_DEVICE);
hypre_SetIndex(stride, 1);
hypre_SStructGridIntersect(grid, part, var, set_box, -1,
&boxman_entries, &nboxman_entries);
for (ii = 0; ii < nboxman_entries; ii++)
{
hypre_SStructBoxManEntryGetStrides(boxman_entries[ii], rs, matrix_type);
hypre_CopyBox(set_box, box);
hypre_BoxManEntryGetExtents(boxman_entries[ii],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(box, map_box, int_box);
hypre_CopyBox(int_box, box);
/* For each index in 'box', compute a row of length <= nentries and
* insert it into an nentries-length segment of 'cols' and 'ijvalues'.
* This may result in gaps, but IJSetValues2() is designed for that. */
nrows = hypre_BoxVolume(box);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < nrows; i++)
{
ncols[i] = 0;
row_indexes[i] = i*nentries;
}
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
hypre_CopyBox(box, to_box);
offset = shape[entry];
hypre_BoxShiftPos(to_box, offset);
hypre_SStructGridIntersect(dom_grid, part, vars[entry], to_box, -1,
&boxman_to_entries, &nboxman_to_entries);
for (jj = 0; jj < nboxman_to_entries; jj++)
{
hypre_SStructBoxManEntryGetStrides(boxman_to_entries[jj], cs, matrix_type);
hypre_BoxManEntryGetExtents(boxman_to_entries[jj],
hypre_BoxIMin(map_box), hypre_BoxIMax(map_box));
hypre_IntersectBoxes(to_box, map_box, int_box);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_to_entries[jj],
index, &col_base, matrix_type);
hypre_BoxShiftNeg(int_box, offset);
hypre_CopyIndex(hypre_BoxIMin(int_box), index);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entries[ii],
index, &row_base, matrix_type);
start = hypre_BoxIMin(int_box);
hypre_BoxGetSize(int_box, loop_size);
/*FIXME: Currently works only for the default boxloop (see GetIndex below) */
zypre_BoxLoop2Begin(ndim, loop_size,
box, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
hypre_Index index;
HYPRE_Int d, ci;
hypre_BoxLoopGetIndex(index); /* FIXME (see comment above) */
ci = mi*nentries + ncols[mi];
rows[mi] = row_base;
cols[ci] = col_base;
for (d = 0; d < ndim; d++)
{
rows[mi] += index[d]*rs[d];
cols[ci] += index[d]*cs[d];
}
ijvalues[ci] = values[ei + vi*nentries];
ncols[mi]++;
}
zypre_BoxLoop2End(mi, vi);
} /* end loop through boxman to entries */
hypre_TFree(boxman_to_entries, HYPRE_MEMORY_HOST);
} /* end of ei nentries loop */
if (action > 0)
{
HYPRE_IJMatrixAddToValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else if (action > -1)
{
HYPRE_IJMatrixSetValues2(ijmatrix, nrows, ncols,
(const HYPRE_BigInt *) rows,
(const HYPRE_Int *) row_indexes,
(const HYPRE_BigInt *) cols,
(const HYPRE_Complex *) ijvalues);
}
else
{
HYPRE_IJMatrixGetValues(ijmatrix, nrows, ncols, rows, cols, values);
}
} /* end loop through boxman entries */
hypre_TFree(boxman_entries, HYPRE_MEMORY_HOST);
hypre_TFree(ncols, HYPRE_MEMORY_DEVICE);
hypre_TFree(rows, HYPRE_MEMORY_DEVICE);
hypre_TFree(row_indexes, HYPRE_MEMORY_DEVICE);
hypre_TFree(cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijvalues, HYPRE_MEMORY_DEVICE);
hypre_BoxDestroy(to_box);
hypre_BoxDestroy(map_box);
hypre_BoxDestroy(int_box);
}
/*------------------------------------------
* non-stencil entries
*------------------------------------------*/
else
{
/* RDF: THREAD (Check safety on UMatrixSetValues call) */
hypre_BoxGetSize(set_box, loop_size);
hypre_SerialBoxLoop0Begin(ndim, loop_size);
{
hypre_BoxLoopGetIndex(index);
hypre_AddIndexes(index, hypre_BoxIMin(set_box), ndim, index);
hypre_SStructUMatrixSetValues(matrix, part, index, var,
nentries, entries, values, action);
values += nentries;
}
hypre_SerialBoxLoop0End();
}
hypre_BoxDestroy(box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructUMatrixAssemble( hypre_SStructMatrix *matrix )
{
HYPRE_IJMatrix ijmatrix = hypre_SStructMatrixIJMatrix(matrix);
HYPRE_IJMatrixAssemble(ijmatrix);
HYPRE_IJMatrixGetObject(
ijmatrix, (void **) &hypre_SStructMatrixParCSRMatrix(matrix));
return hypre_error_flag;
}
/*==========================================================================
* SStructMatrix routines
*==========================================================================*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixRef( hypre_SStructMatrix *matrix,
hypre_SStructMatrix **matrix_ref )
{
hypre_SStructMatrixRefCount(matrix) ++;
*matrix_ref = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSplitEntries( hypre_SStructMatrix *matrix,
HYPRE_Int part,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Int *nSentries_ptr,
HYPRE_Int **Sentries_ptr,
HYPRE_Int *nUentries_ptr,
HYPRE_Int **Uentries_ptr )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
HYPRE_Int *split = hypre_SStructMatrixSplit(matrix, part, var);
hypre_SStructStencil *stencil = hypre_SStructGraphStencil(graph, part, var);
HYPRE_Int entry;
HYPRE_Int i;
HYPRE_Int nSentries = 0;
HYPRE_Int *Sentries = hypre_SStructMatrixSEntries(matrix);
HYPRE_Int nUentries = 0;
HYPRE_Int *Uentries = hypre_SStructMatrixUEntries(matrix);
for (i = 0; i < nentries; i++)
{
entry = entries[i];
if (entry < hypre_SStructStencilSize(stencil))
{
/* stencil entries */
if (split[entry] > -1)
{
Sentries[nSentries] = split[entry];
nSentries++;
}
else
{
Uentries[nUentries] = entry;
nUentries++;
}
}
else
{
/* non-stencil entries */
Uentries[nUentries] = entry;
nUentries++;
}
}
*nSentries_ptr = nSentries;
*Sentries_ptr = Sentries;
*nUentries_ptr = nUentries;
*Uentries_ptr = Uentries;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
HYPRE_Int *index,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_Index cindex;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
hypre_CopyToCleanIndex(index, ndim, cindex);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetValues(pmatrix, cindex, var,
nSentries, Sentries, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_Box *set_box;
HYPRE_Int d;
/* This creates boxes with zeroed-out extents */
set_box = hypre_BoxCreate(ndim);
for (d = 0; d < ndim; d++)
{
hypre_BoxIMinD(set_box, d) = cindex[d];
hypre_BoxIMaxD(set_box, d) = cindex[d];
}
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
set_box, values, action);
hypre_BoxDestroy(set_box);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetValues(matrix, part, cindex, var,
nUentries, Uentries, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
* (action =-2): get values and zero out
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetBoxValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
HYPRE_Int **nvneighbors = hypre_SStructGridNVNeighbors(grid);
HYPRE_Int *Sentries;
HYPRE_Int *Uentries;
HYPRE_Int nSentries;
HYPRE_Int nUentries;
hypre_SStructPMatrix *pmatrix;
hypre_SStructMatrixSplitEntries(matrix, part, var, nentries, entries,
&nSentries, &Sentries,
&nUentries, &Uentries);
/* S-matrix */
if (nSentries > 0)
{
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
hypre_SStructPMatrixSetBoxValues(pmatrix, set_box, var, nSentries, Sentries,
value_box, values, action);
/* put inter-part couplings in UMatrix and zero them out in PMatrix
* (possibly in ghost zones) */
if (nvneighbors[part][var] > 0)
{
hypre_SStructMatrixSetInterPartValues(matrix, part, set_box, var, nSentries, entries,
value_box, values, action);
}
}
/* U-matrix */
if (nUentries > 0)
{
hypre_SStructUMatrixSetBoxValues(matrix, part, set_box, var, nUentries, Uentries,
value_box, values, action);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Put inter-part couplings in UMatrix and zero them out in PMatrix (possibly in
* ghost zones). Assumes that all entries are stencil entries.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SStructMatrixSetInterPartValues( HYPRE_SStructMatrix matrix,
HYPRE_Int part,
hypre_Box *set_box,
HYPRE_Int var,
HYPRE_Int nentries,
HYPRE_Int *entries,
hypre_Box *value_box,
HYPRE_Complex *values,
HYPRE_Int action )
{
HYPRE_Int ndim = hypre_SStructMatrixNDim(matrix);
hypre_SStructGraph *graph = hypre_SStructMatrixGraph(matrix);
hypre_SStructGrid *grid = hypre_SStructGraphGrid(graph);
hypre_SStructPMatrix *pmatrix;
hypre_SStructPGrid *pgrid;
hypre_SStructStencil *stencil;
hypre_Index *shape;
HYPRE_Int *smap;
HYPRE_Int *vars, frvartype, tovartype;
hypre_StructMatrix *smatrix;
hypre_Box *box, *ibox0, *ibox1, *tobox, *frbox;
hypre_Index stride, loop_size;
hypre_IndexRef offset, start;
hypre_BoxManEntry **frentries, **toentries;
hypre_SStructBoxManInfo *frinfo, *toinfo;
HYPRE_Complex *tvalues = NULL;
HYPRE_Int nfrentries, ntoentries, frpart, topart;
HYPRE_Int entry, sentry, ei, fri, toi;
pmatrix = hypre_SStructMatrixPMatrix(matrix, part);
pgrid = hypre_SStructPMatrixPGrid(pmatrix);
frvartype = hypre_SStructPGridVarType(pgrid, var);
box = hypre_BoxCreate(ndim);
ibox0 = hypre_BoxCreate(ndim);
ibox1 = hypre_BoxCreate(ndim);
tobox = hypre_BoxCreate(ndim);
frbox = hypre_BoxCreate(ndim);
stencil = hypre_SStructPMatrixStencil(pmatrix, var);
smap = hypre_SStructPMatrixSMap(pmatrix, var);
shape = hypre_SStructStencilShape(stencil);
vars = hypre_SStructStencilVars(stencil);
hypre_SetIndex(stride, 1);
for (ei = 0; ei < nentries; ei++)
{
entry = entries[ei];
sentry = smap[entry];
offset = shape[entry];
smatrix = hypre_SStructPMatrixSMatrix(pmatrix, var, vars[entry]);
tovartype = hypre_SStructPGridVarType(pgrid, vars[entry]);
/* shift box in the stencil offset direction */
hypre_CopyBox(set_box, box);
hypre_AddIndexes(hypre_BoxIMin(box), offset, ndim, hypre_BoxIMin(box));
hypre_AddIndexes(hypre_BoxIMax(box), offset, ndim, hypre_BoxIMax(box));
/* get "to" entries */
hypre_SStructGridIntersect(grid, part, vars[entry], box, -1,
&toentries, &ntoentries);
for (toi = 0; toi < ntoentries; toi++)
{
hypre_BoxManEntryGetExtents(
toentries[toi], hypre_BoxIMin(tobox), hypre_BoxIMax(tobox));
hypre_IntersectBoxes(box, tobox, ibox0);
if (hypre_BoxVolume(ibox0))
{
hypre_SStructBoxManEntryGetPart(toentries[toi], part, &topart);
/* shift ibox0 back */
hypre_SubtractIndexes(hypre_BoxIMin(ibox0), offset, ndim,
hypre_BoxIMin(ibox0));
hypre_SubtractIndexes(hypre_BoxIMax(ibox0), offset, ndim,
hypre_BoxIMax(ibox0));
/* get "from" entries */
hypre_SStructGridIntersect(grid, part, var, ibox0, -1,
&frentries, &nfrentries);
for (fri = 0; fri < nfrentries; fri++)
{
/* don't set couplings within the same part unless possibly for
* cell data (to simplify periodic conditions for users) */
hypre_SStructBoxManEntryGetPart(frentries[fri], part, &frpart);
if (topart == frpart)
{
if ( (frvartype != HYPRE_SSTRUCT_VARIABLE_CELL) ||
(tovartype != HYPRE_SSTRUCT_VARIABLE_CELL) )
{
continue;
}
hypre_BoxManEntryGetInfo(frentries[fri], (void **) &frinfo);
hypre_BoxManEntryGetInfo(toentries[toi], (void **) &toinfo);
if ( hypre_SStructBoxManInfoType(frinfo) ==
hypre_SStructBoxManInfoType(toinfo) )
{
continue;
}
}
hypre_BoxManEntryGetExtents(
frentries[fri], hypre_BoxIMin(frbox), hypre_BoxIMax(frbox));
hypre_IntersectBoxes(ibox0, frbox, ibox1);
if (hypre_BoxVolume(ibox1))
{
tvalues = hypre_TReAlloc(tvalues, HYPRE_Complex, hypre_BoxVolume(ibox1), HYPRE_MEMORY_HOST);
if (action >= 0)
{
/* set or add */
/* copy values into tvalues */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
zypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
tvalues[mi] = values[ei + vi*nentries];
}
zypre_BoxLoop2End(mi, vi);
/* put values into UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* zero out values in PMatrix (possibly in ghost) */
hypre_StructMatrixClearBoxValues(
smatrix, ibox1, 1, &sentry, -1, 1);
}
else
{
/* get */
/* get values from UMatrix */
hypre_SStructUMatrixSetBoxValues(
matrix, part, ibox1, var, 1, &entry, ibox1, tvalues, action);
/* copy tvalues into values */
start = hypre_BoxIMin(ibox1);
hypre_BoxGetSize(ibox1, loop_size);
zypre_BoxLoop2Begin(ndim, loop_size,
ibox1, start, stride, mi,
value_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE
#endif
zypre_BoxLoop2For(mi, vi)
{
values[ei + vi*nentries] = tvalues[mi];
}
zypre_BoxLoop2End(mi, vi);
} /* end if action */
} /* end if nonzero ibox1 */
} /* end of "from" boxman entries loop */
hypre_TFree(frentries, HYPRE_MEMORY_HOST);
} /* end if nonzero ibox0 */
} /* end of "to" boxman entries loop */
hypre_TFree(toentries, HYPRE_MEMORY_HOST);
} /* end of entries loop */
hypre_BoxDestroy(box);
hypre_BoxDestroy(ibox0);
hypre_BoxDestroy(ibox1);
hypre_BoxDestroy(tobox);
hypre_BoxDestroy(frbox);
hypre_TFree(tvalues, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
|
tensor_utils.h | // BEGINLICENSE
//
// This file is part of helPME, which is distributed under the BSD 3-clause license,
// as described in the LICENSE file in the top level directory of this project.
//
// Author: Andrew C. Simmonett
//
// ENDLICENSE
#ifndef _HELPME_TENSOR_UTILS_H_
#define _HELPME_TENSOR_UTILS_H_
#ifdef _MSC_VER
#define __restrict__ __restrict
#endif
#if HAVE_BLAS == 1
extern "C" {
extern void dgemm_(char *, char *, int *, int *, int *, double *, double *, int *, double *, int *, double *, double *,
int *);
extern void sgemm_(char *, char *, int *, int *, int *, float *, float *, int *, float *, int *, float *, float *,
int *);
}
#endif
namespace helpme {
/*!
* \brief Sorts a 3D tensor stored contiguously as ABC into CBA order.
* \param abcPtr the address of the incoming ABC ordered tensor.
* \param aDimension the dimension of the A index.
* \param bDimension the dimension of the B index.
* \param cDimension the dimension of the C index.
* \param cbaPtr the address of the outgoing CBA ordered tensor.
* \param nThreads the number of parallel threads to use.
*/
template <typename Real>
void permuteABCtoCBA(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension,
Real *__restrict__ cbaPtr, size_t nThreads = 1) {
#pragma omp parallel for num_threads(nThreads)
for (int B = 0; B <= -1 + bDimension; ++B)
for (int C = 0; C <= -1 + cDimension; ++C)
for (int A = 0; A <= -1 + aDimension; ++A)
cbaPtr[aDimension * bDimension * C + aDimension * B + A] =
abcPtr[cDimension * bDimension * A + cDimension * B + C];
}
/*!
* \brief Sorts a 3D tensor stored contiguously as ABC into ACB order.
* \param abcPtr the address of the incoming ABC ordered tensor.
* \param aDimension the dimension of the A index.
* \param bDimension the dimension of the B index.
* \param cDimension the dimension of the C index.
* \param acbPtr the address of the outgoing ACB ordered tensor.
* \param nThreads the number of parallel threads to use.
*/
template <typename Real>
void permuteABCtoACB(Real const *__restrict__ abcPtr, int const aDimension, int const bDimension, int const cDimension,
Real *__restrict__ acbPtr, size_t nThreads = 1) {
#pragma omp parallel for num_threads(nThreads)
for (int A = 0; A <= -1 + aDimension; ++A)
for (int C = 0; C <= -1 + cDimension; ++C)
for (int B = 0; B <= -1 + bDimension; ++B)
acbPtr[bDimension * cDimension * A + bDimension * C + B] =
abcPtr[cDimension * bDimension * A + cDimension * B + C];
}
/*!
* \brief Contracts an ABxC tensor with a DxC tensor, to produce an ABxD quantity.
* \param abcPtr the address of the incoming ABxC tensor.
* \param dcPtr the address of the incoming DxC tensor.
* \param abDimension the dimension of the AB index.
* \param cDimension the dimension of the C index.
* \param dDimension the dimension of the D index.
* \param abdPtr the address of the outgoing ABD tensor.
*/
template <typename Real>
void contractABxCWithDxC(Real const *__restrict__ abcPtr, Real const *__restrict__ dcPtr, int const abDimension,
int const cDimension, int const dDimension, Real *__restrict__ abdPtr) {
Real acc_C;
for (int AB = 0; AB <= -1 + abDimension; ++AB) {
for (int D = 0; D <= -1 + dDimension; ++D) {
acc_C = 0;
for (int C = 0; C <= -1 + cDimension; ++C)
acc_C = acc_C + abcPtr[cDimension * AB + C] * dcPtr[cDimension * D + C];
abdPtr[dDimension * AB + D] = acc_C;
}
}
}
#if HAVE_BLAS == 1
template <>
void contractABxCWithDxC<float>(float const *__restrict__ abcPtr, float const *__restrict__ dcPtr,
int const abDimension, int const cDimension, int const dDimension,
float *__restrict__ abdPtr) {
if (abDimension == 0 || cDimension == 0 || dDimension == 0) return;
char transB = 't';
char transA = 'n';
float alpha = 1;
float beta = 0;
sgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension),
const_cast<int *>(&cDimension), &alpha, const_cast<float *>(dcPtr), const_cast<int *>(&cDimension),
const_cast<float *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension));
}
template <>
void contractABxCWithDxC<double>(double const *__restrict__ abcPtr, double const *__restrict__ dcPtr,
int const abDimension, int const cDimension, int const dDimension,
double *__restrict__ abdPtr) {
if (abDimension == 0 || cDimension == 0 || dDimension == 0) return;
char transB = 't';
char transA = 'n';
double alpha = 1;
double beta = 0;
dgemm_(&transB, &transA, const_cast<int *>(&dDimension), const_cast<int *>(&abDimension),
const_cast<int *>(&cDimension), &alpha, const_cast<double *>(dcPtr), const_cast<int *>(&cDimension),
const_cast<double *>(abcPtr), const_cast<int *>(&cDimension), &beta, abdPtr, const_cast<int *>(&dDimension));
}
#endif
} // Namespace helpme
#endif // Header guard
|
phase.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void phase_usage() {
fprintf(stderr, "usage: margin phase <ALIGN_BAM> <REFERENCE_FASTA> <VARIANT_VCF> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Tags reads in ALIGN_BAM using variants in VARIANT_VCF.\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n");
fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n");
fprintf(stderr, " PARAMS is the file with margin parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\nOutput options:\n");
fprintf(stderr, " -M --skipHaplotypeBAM : Do not write out phased BAM\n");
fprintf(stderr, " -V --skipPhasedVCF : Do not write out phased VCF\n");
fprintf(stderr, "\n");
}
int phase_main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
int64_t maxDepth = -1;
bool inMemory = TRUE;
bool shouldOutputHaplotaggedBam = TRUE;
bool shouldOutputPhasedVcf = TRUE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
phase_usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
vcfFile = stString_copy(argv[3]);
paramsFile = stString_copy(argv[4]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "depth", required_argument, 0, 'p'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ "skipHaplotypeBAM", no_argument, 0, 'M'},
{ "skipPhasedVCF", no_argument, 0, 'V'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:kMV", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
phase_usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 'p':
maxDepth = atoi(optarg);
if (maxDepth < 0) {
st_errAbort("Invalid maxDepth: %s", optarg);
}
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case 'k':
inMemory = FALSE;
break;
case 'M':
shouldOutputHaplotaggedBam = FALSE;
break;
case 'V':
shouldOutputPhasedVcf = FALSE;
break;
default:
phase_usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
return 0;
}
}
// sanity check (conflicting params)
if (!shouldOutputHaplotaggedBam && !shouldOutputPhasedVcf) {
st_errAbort("With --skipHaplotypeBAM and --skipPhasedVCF there will be no output.\n");
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(vcfFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// update depth (if set)
if (maxDepth >= 0) {
st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth,
maxDepth);
params->polishParams->maxDepth = (uint64_t) maxDepth;
}
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get valid contigs (to help bam chunker construction)
stList *vcfContigsTmp = stHash_getKeys(vcfEntries);
stSet *vcfContigs = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(vcfContigsTmp); i++) {
stSet_insert(vcfContigs, stList_get(vcfContigsTmp, i));
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, vcfContigs, params->polishParams, TRUE);
char *regionStrInformative = regionStr != NULL ? stString_copy(regionStr) : stString_join2(",", vcfContigsTmp);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStrInformative, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
free(regionStrInformative);
stList_destruct(vcfContigsTmp);
stSet_destruct(vcfContigs);
// print chunk info
char *outputChunksFile = stString_print("%s.chunks.csv", outputBase);
FILE *chunksOut = safe_fopen(outputChunksFile, "w");
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
BamChunk *c = stList_get(bamChunker->chunks, i);
fprintf(chunksOut, "%s,%"PRId64",%"PRId64",%"PRId64",%"PRId64"\n", c->refSeqName, c->chunkOverlapStart,
c->chunkOverlapEnd, c->chunkStart, c->chunkEnd);
}
fclose(chunksOut);
free(outputChunksFile);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, NULL, NULL, NULL, NULL,
".hap1", ".hap2", inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeDescriptor);
free(timeDescriptor);
}
// Get reference string for chunk of alignment
char *chunkReference = getSequenceFromReference(referenceFastaFile, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// get VCF string
stList *chunkVcfEntries = getVcfEntriesForRegion(vcfEntries, NULL, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
updateVcfEntriesWithSubstringsAndPositions(chunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
extractReadSubstringsAtVariantPositions(bamChunk, chunkVcfEntries, reads, filteredReads, params->polishParams);
// do downsampling if appropriate
if (params->polishParams->maxDepth > 0) {
// get downsampling structures
stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
bool didDownsample = downsampleBamChunkReadWithVcfEntrySubstringsViaFullReadLengthLikelihood(
params->polishParams->maxDepth, chunkVcfEntries, reads, maintainedReads, filteredReads);
// we need to destroy the discarded reads and structures
if (didDownsample) {
st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(reads), stList_length(maintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(reads, NULL);
stList_destruct(reads);
// and keep the filtered reads
reads = maintainedReads;
}
// no downsampling, we just need to free the (empty) objects
else {
assert(stList_length(maintainedReads) == 0);
stList_destruct(maintainedReads);
}
}
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
int64_t bubbleFindingIteration = 0;
BubbleGraph *bg = NULL;
stHash *readsToPSeqs = NULL;
stSet *readsBelongingToHap1 = NULL, *readsBelongingToHap2 = NULL;
stGenomeFragment *gf = NULL;
stReference *ref = NULL;
stList *vcfEntriesToBubbles = NULL;
// Get the bubble graph representation
bg = bubbleGraph_constructFromVCFAndBamChunkReadVcfEntrySubstrings(reads, chunkVcfEntries, params,
&vcfEntriesToBubbles);
// Now make a POA for each of the haplotypes
ref = bubbleGraph_getReference(bg, bamChunk->refSeqName, params);
gf = bubbleGraph_phaseBubbleGraph(bg, ref, reads, params, &readsToPSeqs);
stGenomeFragment_phaseBamChunkReads(gf, readsToPSeqs, reads, &readsBelongingToHap1, &readsBelongingToHap2,
params->phaseParams);
st_logInfo(" %s After phasing, of %i reads got %i reads partitioned into hap1 and %i reads partitioned "
"into hap2 (%i unphased)\n", logIdentifier, (int) stList_length(reads),
(int) stSet_size(readsBelongingToHap1), (int) stSet_size(readsBelongingToHap2),
(int) (stList_length(reads) - stSet_size(readsBelongingToHap1) -
stSet_size(readsBelongingToHap2)));
st_logInfo(" %s Phased primary reads in %d sec\n", logIdentifier, time(NULL) - primaryPhasingStart);
// should included filtered reads in output
// get reads
for (int64_t bcrIdx = 0; bcrIdx < stList_length(reads); bcrIdx++) {
BamChunkRead *bcr = stList_get(reads, bcrIdx);
if (!stSet_search(readsBelongingToHap1, bcr) && !stSet_search(readsBelongingToHap2, bcr)) {
// was filtered in some form
stList_append(filteredReads, bamChunkRead_constructCopy(bcr));
}
}
st_logInfo(" %s Assigning %"PRId64" filtered reads to haplotypes\n", logIdentifier, stList_length(filteredReads));
time_t filteredPhasingStart = time(NULL);
bubbleGraph_partitionFilteredReadsFromVcfEntries(filteredReads, gf, bg, vcfEntriesToBubbles, readsBelongingToHap1,
readsBelongingToHap2, params, logIdentifier);
st_logInfo(" %s Partitioned filtered reads in %d sec.\n", logIdentifier, time(NULL) - filteredPhasingStart);
// Output
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
NULL, NULL, reads, readsBelongingToHap1, readsBelongingToHap2, gf,
params);
// save
// only use primary reads (not filteredReads) to track read phasing
updateOriginalVcfEntriesWithBubbleData(bamChunk, reads, bamChunker->readEnumerator, gf, bg,
vcfEntriesToBubbles, readsBelongingToHap1, readsBelongingToHap2, logIdentifier);
// Cleanup
if (chunkVcfEntries != NULL) stList_destruct(chunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
stHash_destruct(readsToPSeqs);
stList_destruct(vcfEntriesToBubbles);
free(chunkReference);
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n",
logIdentifier, stList_length(reads) + stList_length(filteredReads), (int) (time(NULL) - chunkStartTime));
}
// final post-completion logging cleanup
stList_destruct(reads);
stList_destruct(filteredReads);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = stList_construct3(0, free);
stList *allReadIdsHap2 = stList_construct3(0, free);
// for writing vcf
bool *chunkWasSwitched = st_calloc(bamChunker->chunkCount, sizeof(bool));
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, TRUE, bamChunker->chunkCount, allReadIdsHap1, allReadIdsHap2,
chunkWasSwitched);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical("> Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logCritical("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
if (shouldOutputHaplotaggedBam) {
// logging
time_t hapBamStart = time(NULL);
st_logInfo("> Writing final haplotyped BAMs\n");
// get all reads
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for (int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
// loggit
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotyped bams in %s\n", hapBamTDS);
// cleanup
free(hapBamTDS);
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
}
// maybe write VCF
if (shouldOutputPhasedVcf) {
// loggit
time_t vcfWriteStart = time(NULL);
char *outputVcfFile = stString_print("%s.phased.vcf", outputBase);
char *outputPhaseSetFile = stString_print("%s.phaseset.bed", outputBase);
st_logCritical("> Writing phased VCF to %s, phaseset info to %s\n", outputVcfFile, outputPhaseSetFile);
// write it
updateHaplotypeSwitchingInVcfEntries(bamChunker, chunkWasSwitched, vcfEntries);
writePhasedVcf(vcfFile, regionStr, outputVcfFile, outputPhaseSetFile, vcfEntries, params);
// loggit
char *phasedVcfTDS = getTimeDescriptorFromSeconds(time(NULL) - vcfWriteStart);
st_logCritical("> Wrote phased VCF in %s\n", phasedVcfTDS);
// cleanup
free(phasedVcfTDS);
free(outputVcfFile);
free(outputPhaseSetFile);
}
// cleanup
free(chunkWasSwitched);
bamChunker_destruct(bamChunker);
params_destruct(params);
if (regionStr != NULL) free(regionStr);
stList_destruct(chunkOrder);
free(vcfFile);
stHash_destruct(vcfEntries);
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished phasing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#define mm 15
#define npart 4*mm*mm*mm
/*
* Function declarations
*/
void
dfill(int,double,double[],int);
void
domove(int,double[],double[],double[],double);
void
dscal(int,double,double[],int);
void
fcc(double[],int,int,double);
void
forces(int,double[],double[],double,double,double**,int);
double
mkekin(int,double[],double[],double,double);
void
mxwell(double[],int,double,double);
void
prnout(int,double,double,double,double,double,double,int,double);
double
velavg(int,double[],double,double);
double
secnds(void);
/*
* Variable declarations
*/
double epot;
double vir;
double count;
/*
* Main program : Molecular Dynamics simulation.
*/
int main(){
int move;
double x[npart*3], vh[npart*3], f[npart*3];
double ekin;
double vel;
double sc;
double start, time;
int nthreads,t;
double **ftemp;
/*
* Parameter definitions
*/
double den = 0.83134;
double side = pow((double)npart/den,0.3333333);
double tref = 0.722;
double rcoff = (double)mm/4.0;
double h = 0.064;
int irep = 10;
int istop = 20;
int iprint = 5;
int movemx = 20;
double a = side/(double)mm;
double hsq = h*h;
double hsq2 = hsq*0.5;
double tscale = 16.0/((double)npart-1.0);
double vaver = 1.13*sqrt(tref/24.0);
/*
* Initial output
*/
printf(" Molecular Dynamics Simulation example program\n");
printf(" ---------------------------------------------\n");
printf(" number of particles is ............ %6d\n",npart);
printf(" side length of the box is ......... %13.6f\n",side);
printf(" cut off is ........................ %13.6f\n",rcoff);
printf(" reduced temperature is ............ %13.6f\n",tref);
printf(" basic timestep is ................. %13.6f\n",h);
printf(" temperature scale interval ........ %6d\n",irep);
printf(" stop scaling at move .............. %6d\n",istop);
printf(" print interval .................... %6d\n",iprint);
printf(" total no. of steps ................ %6d\n",movemx);
/*
* Generate fcc lattice for atoms inside box
*/
fcc(x, npart, mm, a);
/*
* Initialise velocities and forces (which are zero in fcc positions)
*/
mxwell(vh, 3*npart, h, tref);
dfill(3*npart, 0.0, f, 1);
/*
* Start of md
*/
printf("\n i ke pe e temp "
" pres vel rp\n ----- ---------- ----------"
" ---------- -------- -------- -------- ----\n");
/* allocate storage for per-thread copies of f */
nthreads = omp_get_max_threads();
ftemp = (double **) malloc(nthreads*sizeof(double *));
for (t = 0; t < nthreads; t++){
ftemp[t]=(double *) calloc(npart*3,sizeof(double));
}
start = secnds();
#pragma omp parallel default(shared) private(move)
{
for (move=1; move<=movemx; move++) {
/*
* Move the particles and partially update velocities
*/
#pragma omp single
{
domove(3*npart, x, vh, f, side);
}
/*
* Compute forces in the new positions and accumulate the virial
* and potential energy.
*/
forces(npart, x, f, side, rcoff, ftemp, nthreads);
/*
* Scale forces, complete update of velocities and compute k.e.
*/
#pragma omp single
{
ekin=mkekin(npart, f, vh, hsq2, hsq);
/*
* Average the velocity and temperature scale if desired
*/
vel=velavg(npart, vh, vaver, h);
if (move<istop && fmod(move, irep)==0) {
sc=sqrt(tref/(tscale*ekin));
dscal(3*npart, sc, vh, 1);
ekin=tref/tscale;
}
/*
* Sum to get full potential energy and virial
*/
if (fmod(move, iprint)==0)
prnout(move, ekin, epot, tscale, vir, vel, count, npart, den);
}
}
}
time = secnds() - start;
printf("Time = %f\n",(float) time);
}
double secnds()
{
return omp_get_wtime();
}
|
GB_binop__pow_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc64)
// C=scalar+B GB (_bind1st__pow_fc64)
// C=scalar+B' GB (_bind1st_tran__pow_fc64)
// C=A+scalar GB (_bind2nd__pow_fc64)
// C=A'+scalar GB (_bind2nd_tran__pow_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_cpow (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_cpow (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_cpow (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_cpow (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_cpow (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_cpow (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
McULTRABuilder.h | #pragma once
#include <algorithm>
#include "../../../DataStructures/TripBased/Data.h"
#include "../../../DataStructures/RAPTOR/Data.h"
#include "../../../Helpers/MultiThreading.h"
#include "../../../Helpers/Timer.h"
#include "../../../Helpers/Console/Progress.h"
#include "McShortcutSearch.h"
namespace TripBased {
template<bool DEBUG = false, bool USE_TIEBREAKING_KEY = true>
class McULTRABuilder {
public:
inline static constexpr bool Debug = DEBUG;
inline static constexpr bool UseTiebreakingKey = USE_TIEBREAKING_KEY;
using Type = McULTRABuilder<Debug, UseTiebreakingKey>;
public:
McULTRABuilder(const Data& data) :
data(data) {
stopEventGraph.addVertices(data.numberOfStopEvents());
}
void computeShortcuts(const ThreadPinning& threadPinning, const int intermediateWitnessTransferLimit = 0, const int finalWitnessTransferLimit = 0, const int minDepartureTime = -never, const int maxDepartureTime = never, const bool verbose = true) noexcept {
if (verbose) std::cout << "Computing shortcuts with " << threadPinning.numberOfThreads << " threads." << std::endl;
std::vector<Shortcut> shortcuts;
Progress progress(data.numberOfStops(), verbose);
omp_set_num_threads(threadPinning.numberOfThreads);
#pragma omp parallel
{
threadPinning.pinThread();
McShortcutSearch<Debug, UseTiebreakingKey> shortcutSearch(data, intermediateWitnessTransferLimit, finalWitnessTransferLimit);
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < data.numberOfStops(); i++) {
shortcutSearch.run(StopId(i), minDepartureTime, maxDepartureTime);
progress++;
}
#pragma omp critical
{
const std::vector<Shortcut>& localShortcuts = shortcutSearch.getShortcuts();
for (const Shortcut& shortcut : localShortcuts) {
shortcuts.emplace_back(shortcut);
}
}
}
std::sort(shortcuts.begin(), shortcuts.end(), [](const Shortcut& a, const Shortcut& b){
return (a.origin < b.origin) || ((a.origin == b.origin) && (a.destination < b.destination));
});
stopEventGraph.addEdge(Vertex(shortcuts[0].origin), Vertex(shortcuts[0].destination)).set(TravelTime, shortcuts[0].walkingDistance);
for (size_t i = 1; i < shortcuts.size(); i++) {
if ((shortcuts[i].origin == shortcuts[i - 1].origin) && (shortcuts[i].destination == shortcuts[i - 1].destination)) continue;
stopEventGraph.addEdge(Vertex(shortcuts[i].origin), Vertex(shortcuts[i].destination)).set(TravelTime, shortcuts[i].walkingDistance);
}
stopEventGraph.sortEdges(ToVertex);
progress.finished();
}
inline const DynamicTransferGraph& getStopEventGraph() const noexcept {
return stopEventGraph;
}
inline DynamicTransferGraph& getStopEventGraph() noexcept {
return stopEventGraph;
}
private:
const Data& data;
DynamicTransferGraph stopEventGraph;
};
}
|
taskwait-depend.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// The GOMP wrapper does not handle `task if(0) depend()` and drops the
// dependency. Once this is fixed, reevaluate the GCC status:
// XFAIL: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8, gcc-9, gcc-10
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{ x++; }
print_fuzzy_address(1);
//#pragma omp taskwait depend(in: x) <-- currently not supported in clang
#pragma omp task if (0) depend(in : x)
{}
print_fuzzy_address(2);
}
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred|
// CHECK-SAME: ompt_task_mergeable=1207959556, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <algorithm>
#include <cinttypes>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include <assert.h>
#include <cstring>
#include "pvector.h"
#include "util.h"
using namespace std;
#define debug 0
#define BLOCK_SIZE 511
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v; // destination of this edge in the graph
WeightT_ w; // weight of the edge
uint64_t t; // timestamp when this edge inserted
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1), t(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w), t(1) {}
NodeWeight(NodeID_ v, WeightT_ w, uint64_t t) : v(v), w(w), t(t) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
typedef int32_t NodeID;
typedef int32_t WeightT;
// structure for the vertices
struct vertex_element {
uint64_t head;
uint64_t tail;
uint32_t degree;
};
// blocks of edges
struct edge_block {
struct NodeWeight<NodeID, WeightT> block[BLOCK_SIZE]; // edge-list segment
uint64_t next; // timestamp when this edge inserted
};
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used for *non-negative* offsets within a neighborhood
typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT;
typedef EdgePair<NodeID_, DestID_> Edge;
typedef pvector<Edge> EdgeList;
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
struct edge_block *curr_edge_block_;
uint32_t degree_, curr_idx_;
DestID_ *begin_ptr_;
DestID_ *end_ptr_;
public:
Neighborhood(struct edge_block *curr_edge_block, OffsetT start_offset, uint32_t degree) :
curr_edge_block_(curr_edge_block), degree_(degree), curr_idx_(start_offset) {
if(start_offset >= degree) begin_ptr_ = nullptr;
else begin_ptr_ = &(curr_edge_block_->block[start_offset]);
end_ptr_ = nullptr;
//cout << "neighborhood: " << g_index->v << endl;
}
class iterator {
public:
struct edge_block *curr_edge_block_;
uint32_t curr_idx_, degree_;
iterator() {
g_index_ = nullptr;
curr_edge_block_ = nullptr;
curr_idx_ = 0;
degree_ = 0;
}
iterator(DestID_ *g_index) {
g_index_ = g_index;
curr_edge_block_ = nullptr;
curr_idx_ = 0;
degree_ = 0;
}
iterator(DestID_ *g_index, struct edge_block *curr_edge_block, uint32_t curr_idx, uint32_t degree) {
g_index_ = g_index;
curr_edge_block_ = curr_edge_block;
curr_idx_ = curr_idx;
degree_ = degree;
}
iterator &operator++() {
//cout << "++" << endl;
curr_idx_ += 1;
if(curr_idx_ == degree_) g_index_ = nullptr;
else {
if (curr_idx_ % BLOCK_SIZE == 0) curr_edge_block_ = (struct edge_block *) curr_edge_block_->next;
g_index_ = &(curr_edge_block_->block[curr_idx_ % BLOCK_SIZE]);
}
return *this;
}
operator DestID_ *() const {
//cout << "DestID_ *" << endl;
return g_index_;
}
DestID_ *operator->() {
//cout << "*operator->" << endl;
return g_index_;
}
DestID_ &operator*() {
//cout << "&operator*" << endl;
return (*g_index_);
}
bool operator==(const iterator &rhs) const {
//cout << "operator==(const iterator &rhs)" << endl;
return g_index_ == rhs.g_index_;
}
bool operator!=(const iterator &rhs) const {
//cout << "operator!=(const iterator &rhs)" << endl;
return (g_index_ != rhs.g_index_);
}
private:
DestID_ *g_index_;
};
iterator begin() { return iterator(begin_ptr_, curr_edge_block_, curr_idx_, degree_); }
iterator end() { return iterator(end_ptr_); }
};
void ReleaseResources() {
for(NodeID_ i=0; i<num_nodes_; i+=1) {
struct edge_block *head = (struct edge_block *) vertices_[i].head;
while (head != nullptr) {
struct edge_block *tmp = head;
head = (struct edge_block *) head->next;
delete[] tmp;
}
}
if (vertices_ != nullptr) delete[] vertices_;
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1), vertices_(nullptr) {}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
vertices_(other.vertices_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.vertices_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
vertices_ = other.vertices_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.vertices_ = nullptr;
}
return *this;
}
CSRGraph(EdgeList &base_edge_list, bool is_directed, uint64_t n_edges, uint64_t n_vertices) {
num_edges_ = n_edges;
num_nodes_ = n_vertices;
directed_ = is_directed;
vertices_ = (struct vertex_element *) calloc(num_nodes_, sizeof(struct vertex_element));
uint32_t t_src;
for (int i = 0; i < num_edges_; i++)
{
t_src = base_edge_list[i].u;
if(vertices_[t_src].degree == 0) {
// initialize a new edge-list segment and update head/tail in the vertex structure
struct edge_block *curr_block = (struct edge_block *) malloc(sizeof(struct edge_block));
curr_block->next = 0;
int32_t curr_idx = 0;
curr_block->block[curr_idx].v = base_edge_list[i].v.v;
curr_block->block[curr_idx].w = base_edge_list[i].v.w;
curr_block->block[curr_idx].t = base_edge_list[i].v.t;
vertices_[t_src].head = (uint64_t) curr_block;
vertices_[t_src].tail = (uint64_t) curr_block;
}
else {
if(vertices_[t_src].degree%BLOCK_SIZE == 0) {
// it's time to create a new segment
struct edge_block *curr_block = (struct edge_block *) malloc(sizeof(struct edge_block));
curr_block->next = 0;
int32_t curr_idx = 0;
curr_block->block[curr_idx].v = base_edge_list[i].v.v;
curr_block->block[curr_idx].w = base_edge_list[i].v.w;
curr_block->block[curr_idx].t = base_edge_list[i].v.t;
// linking current-block at the next pointer of the current tail
((struct edge_block *) vertices_[t_src].tail)->next = (uint64_t) curr_block;
// update tail segment
vertices_[t_src].tail = (uint64_t) curr_block;
}
else {
// we have enough space in current segment
struct edge_block *curr_block = (struct edge_block *) vertices_[t_src].tail;
int32_t curr_idx = vertices_[t_src].degree%BLOCK_SIZE;
curr_block->block[curr_idx].v = base_edge_list[i].v.v;
curr_block->block[curr_idx].w = base_edge_list[i].v.w;
curr_block->block[curr_idx].t = base_edge_list[i].v.t;
}
}
vertices_[t_src].degree += 1;
}
}
void insert(uint32_t src, uint32_t dst, uint32_t value)
{
if(debug) printf("[insert(%u, %u)] Called!\n", src, dst);
if(vertices_[src].degree == 0) {
// initialize a new edge-list segment and update head/tail in the vertex structure
struct edge_block *curr_block = (struct edge_block *) malloc(sizeof(struct edge_block));
curr_block->next = 0;
int32_t curr_idx = 0;
curr_block->block[curr_idx].v = dst;
curr_block->block[curr_idx].w = value;
curr_block->block[curr_idx].t = num_edges_;
vertices_[src].head = (uint64_t) curr_block;
vertices_[src].tail = (uint64_t) curr_block;
}
else {
if(vertices_[src].degree%BLOCK_SIZE == 0) {
// it's time to create a new segment
struct edge_block *curr_block = (struct edge_block *) malloc(sizeof(struct edge_block));
curr_block->next = 0;
int32_t curr_idx = 0;
curr_block->block[curr_idx].v = dst;
curr_block->block[curr_idx].w = value;
curr_block->block[curr_idx].t = num_edges_;
// linking current-block at the next pointer of the current tail
((struct edge_block *) vertices_[src].tail)->next = (uint64_t) curr_block;
// update tail segment
vertices_[src].tail = (uint64_t) curr_block;
}
else {
// we have enough space in current segment
struct edge_block *curr_block = (struct edge_block *) vertices_[src].tail;
int32_t curr_idx = vertices_[src].degree%BLOCK_SIZE;
curr_block->block[curr_idx].v = dst;
curr_block->block[curr_idx].w = value;
curr_block->block[curr_idx].t = num_edges_;
}
}
vertices_[src].degree += 1;
num_edges_ += 1;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return vertices_[v].degree;
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return vertices_[v].degree;
}
Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const {
//cout << "degree: " << vertices_[n].degree << " " << vertices_[n].head << endl;
return Neighborhood((struct edge_block *) vertices_[n].head, start_offset, vertices_[n].degree);
}
Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood((struct edge_block *) vertices_[n].head, start_offset, vertices_[n].degree);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
void PrintTopology(NodeID_ src) const {
uint32_t j = 0;
uint64_t curr_ptr = vertices_[src].head;
std::cout << src << "(" << vertices_[src].degree << "): ";
while(curr_ptr) {
struct edge_block *curr_edge_block = (struct edge_block *) curr_ptr;
cout << curr_edge_block->block[j%BLOCK_SIZE].v << " ";
j += 1;
if(j == vertices_[src].degree) break;
if(j%BLOCK_SIZE == 0) curr_ptr = curr_edge_block->next;
}
cout << endl;
std::cout << src << "(" << out_degree(src) << "): ";
for (DestID_ j : out_neigh(src)) {
std::cout << j.v << " ";
}
std::cout << std::endl << std::endl;
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
// note: keeing this for dummy purpose
pvector<SGOffset> offsets(num_nodes_+1);
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
struct vertex_element *vertices_; //underlying storage for vertex list
};
#endif // GRAPH_H_
|
nstream-ua-target.c | ///
/// Copyright (c) 2019, Intel Corporation
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// * Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// * Redistributions in binary form must reproduce the above
/// copyright notice, this list of conditions and the following
/// disclaimer in the documentation and/or other materials provided
/// with the distribution.
/// * Neither the name of Intel Corporation nor the names of its
/// contributors may be used to endorse or promote products
/// derived from this software without specific prior written
/// permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
/// POSSIBILITY OF SUCH DAMAGE.
//////////////////////////////////////////////////////////////////////
///
/// NAME: nstream
///
/// PURPOSE: To compute memory bandwidth when adding a vector of a given
/// number of double precision values to the scalar multiple of
/// another vector of the same length, and storing the result in
/// a third vector.
///
/// USAGE: The program takes as input the number
/// of iterations to loop over the triad vectors, the length of the
/// vectors, and the offset between vectors
///
/// <progname> <# iterations> <vector length> <offset>
///
/// The output consists of diagnostics to make sure the
/// algorithm worked, and of timing statistics.
///
/// NOTES: Bandwidth is determined as the number of words read, plus the
/// number of words written, times the size of the words, divided
/// by the execution time. For a vector length of N, the total
/// number of words read and written is 4*N*sizeof(double).
///
///
/// HISTORY: This code is loosely based on the Stream benchmark by John
/// McCalpin, but does not follow all the Stream rules. Hence,
/// reported results should not be associated with Stream in
/// external publications
///
/// Converted to C++11 by Jeff Hammond, November 2017.
/// Converted to C11 by Jeff Hammond, February 2019.
///
//////////////////////////////////////////////////////////////////////
#pragma omp requires unified_address
#include "prk_util.h"
#include "prk_openmp.h"
int main(int argc, char * argv[])
{
printf("Parallel Research Kernels version %d\n", PRKVERSION );
printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n");
//////////////////////////////////////////////////////////////////////
/// Read and test input parameters
//////////////////////////////////////////////////////////////////////
if (argc < 3) {
printf("Usage: <# iterations> <vector length>\n");
return 1;
}
int iterations = atoi(argv[1]);
if (iterations < 1) {
printf("ERROR: iterations must be >= 1\n");
return 1;
}
// length of a the vector
size_t length = atol(argv[2]);
if (length <= 0) {
printf("ERROR: Vector length must be greater than 0\n");
return 1;
}
int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device();
if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) {
printf("ERROR: device number %d is not valid.\n", device);
return 1;
}
printf("Number of iterations = %d\n", iterations);
printf("Vector length = %zu\n", length);
printf("OpenMP Device = %d\n", device);
//////////////////////////////////////////////////////////////////////
// Allocate space and perform the computation
//////////////////////////////////////////////////////////////////////
double nstream_time = 0.0;
int host = omp_get_initial_device();
size_t bytes = length*sizeof(double);
double * restrict A = omp_target_alloc(bytes, host);
double * restrict B = omp_target_alloc(bytes, host);
double * restrict C = omp_target_alloc(bytes, host);
double scalar = 3.0;
#pragma omp parallel for simd schedule(static)
for (size_t i=0; i<length; i++) {
A[i] = 0.0;
B[i] = 2.0;
C[i] = 2.0;
}
{
for (int iter = 0; iter<=iterations; iter++) {
if (iter==1) nstream_time = prk_wtime();
#pragma omp target teams distribute parallel for simd \
schedule(static) device(device)
for (size_t i=0; i<length; i++) {
A[i] += B[i] + scalar * C[i];
}
}
nstream_time = prk_wtime() - nstream_time;
}
omp_target_free(C, host);
omp_target_free(B, host);
//////////////////////////////////////////////////////////////////////
/// Analyze and output results
//////////////////////////////////////////////////////////////////////
double ar = 0.0;
double br = 2.0;
double cr = 2.0;
for (int i=0; i<=iterations; i++) {
ar += br + scalar * cr;
}
ar *= length;
double asum = 0.0;
#pragma omp parallel for reduction(+:asum)
for (size_t i=0; i<length; i++) {
asum += fabs(A[i]);
}
omp_target_free(A, host);
double epsilon=1.e-8;
if (fabs(ar-asum)/asum > epsilon) {
printf("Failed Validation on output array\n"
" Expected checksum: %lf\n"
" Observed checksum: %lf\n"
"ERROR: solution did not validate\n", ar, asum);
return 1;
} else {
printf("Solution validates\n");
double avgtime = nstream_time/iterations;
double nbytes = 4.0 * length * sizeof(double);
printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime);
}
return 0;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
jacobi-v3.c | #include <stdio.h>
#include <math.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This C version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 500
int n,m,mits;
double tol,relax=1.0,alpha=0.0543;
double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
double dx,dy;
unsigned long int chiterations = 0;
unsigned long int chloads = 0;
unsigned long int chstores = 0;
unsigned long int chflops = 0;
int main (void)
{
float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
driver ( ) ;
printf ("chloads =%lu chostores =%lu, chflops=%lu\n",chloads, chstores, chflops);
assert (chflops == 16120260000);
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialized.
*
* Working variables/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
//chiterations = n*m;
#pragma aitool fp_plus(2) fp_multiply(2)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
}
//chiterations = n*m;
#pragma aitool fp_minus(6) fp_multiply(5)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
//chiterations = n*m;
#pragma aitool fp_plus(2) fp_minus(6) fp_multiply(7)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
double omega;
int i,j,k;
double error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
{
chiterations = n*m;
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
chiterations = (n-2)*(m-2);
#pragma aitool fp_plus(5) fp_minus(2) fp_multiply(5) fp_divide(1)
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
}
/* omp end parallel */
/* Error check */
k = k + 1;
if (k%500==0)
printf("Finished %d iteration.\n",k);
error = sqrt(error)/(n*m);
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
double xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
//chiterations = n*m;
#pragma aitool fp_plus(3) fp_minus(3) fp_multiply(6)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
}
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat;
mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat");
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = -1;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids,
&mat->colptr, &mat->colind, &mat->colval, &mat->colids,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, ival;
float *rowval=NULL, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
if (format == GK_CSR_FMT_BINROW) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_BINCOL) {
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
}
if (format == GK_CSR_FMT_CLUTO) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
}
else if (format == GK_CSR_FMT_METIS) {
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
}
else {
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? - 1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
FILE *fpout;
if (format == GK_CSR_FMT_BINROW) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
}
if (format == GK_CSR_FMT_BINCOL) {
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
}
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j]; /* assume val[j] > 0 */
}
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel if (ptr[n] > OMPMINOPS)
{
#pragma omp for private(j,sum) schedule(static)
for (i=0; i<n; i++) {
for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++)
if (norm == 2)
sum += val[j]*val[j];
else if (norm == 1)
sum += val[j];
if (sum > 0) {
if (norm == 2)
sum=1.0/sqrt(sum);
else if (norm == 1)
sum=1.0/sum;
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
double logscale = 1.0/log(2.0);
#pragma omp for schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
}
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel if (ncols > OMPMINOPS)
{
#pragma omp for schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp for schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
}
#pragma omp parallel if (rowptr[nrows] > OMPMINOPS)
{
#pragma omp for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query using cosine
similarity.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns the number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
rnorms = mat->rnorms;
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
rsums = mat->rsums;
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
|
GB_unop__identity_fc64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int64)
// op(A') function: GB (_unop_tran__identity_fc64_int64)
// C type: GxB_FC64_t
// A type: int64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
perturbations.c | /** @file perturbations.c Documented perturbation module
*
* Julien Lesgourgues, 23.09.2010
*
* Deals with the perturbation evolution.
* This module has two purposes:
*
* - at the beginning; to initialize the perturbations, i.e. to
* integrate the perturbation equations, and store temporarily the terms
* contributing to the source functions as a function of conformal
* time. Then, to perform a few manipulations of these terms in order to
* infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to
* store them as a function of conformal time inside an interpolation
* table.
*
* - at any time in the code; to evaluate the source functions at a
* given conformal time (by interpolating within the interpolation
* table).
*
* Hence the following functions can be called from other modules:
*
* -# perturb_init() at the beginning (but after background_init() and thermodynamics_init())
* -# perturb_sources_at_tau() at any later time
* -# perturb_free() at the end, when no more calls to perturb_sources_at_tau() are needed
*/
#include "perturbations.h"
/**
* Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau.
*
* Evaluate source functions at given conformal time tau by reading
* the pre-computed table and interpolating.
*
* @param ppt Input: pointer to perturbation structure containing interpolation tables
* @param index_md Input: index of requested mode
* @param index_ic Input: index of requested initial condition
* @param index_tp Input: index of requested source function type
* @param tau Input: any value of conformal time
* @param psource Output: vector (already allocated) of source function as a function of k
* @return the error status
*/
int perturb_sources_at_tau(
struct perturbs * ppt,
int index_md,
int index_ic,
int index_tp,
double tau,
double * psource
) {
/** Summary: */
/** - define local variables */
int last_index;
double logtau;
logtau = log(tau);
/** - interpolate in pre-computed table contained in ppt */
/** - linear interpolation at early times (z>z_max_pk), available,
but actually never used by default version of CLASS */
if ((logtau < ppt->ln_tau[0]) || (ppt->ln_tau_size <= 1)) {
class_call(array_interpolate_two_bis(ppt->tau_sampling,
1,
0,
ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp],
ppt->k_size[index_md],
ppt->tau_size,
tau,
psource,
ppt->k_size[index_md],
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
/** - more accurate spline interpolation at late times (z<z_max_pk),
used in the calculation of output quantitites like transfer
functions T(k,z) or power spectra P(k,z) */
else {
class_call(array_interpolate_spline(ppt->ln_tau,
ppt->ln_tau_size,
ppt->late_sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp],
ppt->ddlate_sources[index_md][index_ic*ppt->tp_size[index_md] + index_tp],
ppt->k_size[index_md],
logtau,
&last_index,
psource,
ppt->k_size[index_md],
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Function called by the output module or the wrappers, which returns all
* the source functions \f$ S^{X} (k, \tau) \f$ at a given conformal
* time tau corresponding to the input redshift z.
*
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to perturbation structure
* @param output_format Input: choice of ordering and normalisation for the output quantities
* @param z Input: redshift
* @param number_of_titles Input: number of requested source functions (found in perturb_output_titles)
* @param data Output: vector of all source functions for all k values and initial conditions (previously allocated with the right size)
* @return the error status
*/
int perturb_output_data(
struct background * pba,
struct perturbs * ppt,
enum file_format output_format,
double z,
int number_of_titles,
double *data
) {
int n_ncdm;
double k, k_over_h, k2;
double * tkfull=NULL; /* array with argument
pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */
double *tk;
double *dataptr;
double * pvecsources;
double tau;
int index_md = ppt->index_md_scalars;
int index_ic;
int index_k;
int index_tp;
int storeidx;
if (ppt->k_size[index_md]*ppt->ic_size[index_md]*ppt->tp_size[index_md] > 0) {
class_alloc(tkfull,
ppt->k_size[index_md]*ppt->ic_size[index_md]*ppt->tp_size[index_md]*sizeof(double),
ppt->error_message);
}
/** - compute \f$T_i(k)\f$ for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */
/* if z_pk = 0, no interpolation needed */
if (z == 0.) {
for (index_k=0; index_k<ppt->k_size[index_md]; index_k++) {
for (index_tp=0; index_tp<ppt->tp_size[index_md]; index_tp++) {
for (index_ic=0; index_ic<ppt->ic_size[index_md]; index_ic++) {
tkfull[(index_k * ppt->ic_size[index_md] + index_ic) * ppt->tp_size[index_md] + index_tp]
= ppt->sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp][(ppt->tau_size-1) * ppt->k_size[index_md] + index_k];
}
}
}
}
/* if 0 <= z_pk <= z_max_pk, interpolation needed, */
else {
/* check the time corresponding to the highest redshift requested in output plus one */
class_call(background_tau_of_z(pba,
z,
&tau),
pba->error_message,
ppt->error_message);
class_test(log(tau) < ppt->ln_tau[0],
"Asking sources at a z bigger than z_max_pk, something probably went wrong\n",
ppt->error_message);
class_alloc(pvecsources,
ppt->k_size[index_md]*sizeof(double),
ppt->error_message);
for (index_k=0; index_k<ppt->k_size[index_md]; index_k++) {
for (index_tp=0; index_tp<ppt->tp_size[index_md]; index_tp++) {
for (index_ic=0; index_ic<ppt->ic_size[index_md]; index_ic++) {
class_call(perturb_sources_at_tau(ppt,
index_md,
index_ic,
index_tp,
tau,
pvecsources),
ppt->error_message,
ppt->error_message);
tkfull[(index_k * ppt->ic_size[index_md] + index_ic) * ppt->tp_size[index_md] + index_tp] =
pvecsources[index_k];
}
}
}
free(pvecsources);
}
/** - store data */
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_k=0; index_k<ppt->k_size[index_md]; index_k++) {
storeidx = 0;
dataptr = data+index_ic*(ppt->k_size[index_md]*number_of_titles)+index_k*number_of_titles;
tk = &(tkfull[(index_k * ppt->ic_size[index_md] + index_ic) * ppt->tp_size[index_md]]);
k = ppt->k[index_md][index_k];
k2 = k*k;
k_over_h = k/pba->h;
class_store_double(dataptr, k_over_h, _TRUE_,storeidx);
/* indices for species associated with a velocity transfer function in Fourier space */
if (output_format == class_format) {
if (ppt->has_density_transfers == _TRUE_) {
class_store_double(dataptr,tk[ppt->index_tp_delta_g],ppt->has_source_delta_g,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_b],ppt->has_source_delta_b,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_cdm],ppt->has_source_delta_cdm,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_gdm],ppt->has_source_delta_gdm,storeidx); // GDM_CLASS
class_store_double(dataptr,tk[ppt->index_tp_delta_idm_dr],ppt->has_source_delta_idm_dr,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_fld],ppt->has_source_delta_fld,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_ur],ppt->has_source_delta_ur,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_idr],ppt->has_source_delta_idr,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[ppt->index_tp_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[ppt->index_tp_delta_dcdm],ppt->has_source_delta_dcdm,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_dr],ppt->has_source_delta_dr,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_scf],ppt->has_source_delta_scf,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_delta_tot],ppt->has_source_delta_tot,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_phi],ppt->has_source_phi,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_psi],ppt->has_source_psi,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_phi_prime],ppt->has_source_phi_prime,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_h],ppt->has_source_h,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_h_prime],ppt->has_source_h_prime,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_eta],ppt->has_source_eta,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_eta_prime],ppt->has_source_eta_prime,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_H_T_Nb_prime],ppt->has_source_H_T_Nb_prime,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_k2gamma_Nb],ppt->has_source_k2gamma_Nb,storeidx);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_double(dataptr,tk[ppt->index_tp_theta_g],ppt->has_source_theta_g,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_b],ppt->has_source_theta_b,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_cdm],ppt->has_source_theta_cdm,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_gdm],ppt->has_source_theta_gdm,storeidx); // GDM_CLASS
class_store_double(dataptr,tk[ppt->index_tp_theta_idm_dr],ppt->has_source_theta_idm_dr,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_fld],ppt->has_source_theta_fld,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_ur],ppt->has_source_theta_ur,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_idr],ppt->has_source_theta_idr,storeidx);
if (pba->has_ncdm == _TRUE_){
for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr,tk[ppt->index_tp_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx);
}
}
class_store_double(dataptr,tk[ppt->index_tp_theta_dcdm],ppt->has_source_theta_dcdm,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_dr],ppt->has_source_theta_dr,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_scf],ppt->has_source_theta_scf,storeidx);
class_store_double(dataptr,tk[ppt->index_tp_theta_tot],ppt->has_source_theta_tot,storeidx);
}
}
else if (output_format == camb_format) {
/* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_gdm]/k2,ppt->has_source_delta_gdm,storeidx,0.0); // GDM_CLASS
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_idm_dr]/k2,ppt->has_source_delta_idm_dr,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_idr]/k2,ppt->has_source_delta_idr,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0);
class_store_double_or_default(dataptr,-tk[ppt->index_tp_delta_tot]/k2,_TRUE_,storeidx,0.0);
}
}
}
//Necessary because the size could be zero (if ppt->tp_size is zero)
if (tkfull != NULL)
free(tkfull);
return _SUCCESS_;
}
/**
* Fill array of strings with the name of the requested 'mTk, vTk' functions
* (transfer functions as a function of wavenumber for fixed times).
*
* @param pba Input: pointer to the background structure
* @param ppt Input: pointer to the perturbation structure
* @param output_format Input: flag for the format
* @param titles Output: name strings
* @return the error status
*/
int perturb_output_titles(
struct background *pba,
struct perturbs *ppt,
enum file_format output_format,
char titles[_MAXTITLESTRINGLENGTH_]
){
int n_ncdm;
char tmp[40];
if (output_format == class_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
if (ppt->has_density_transfers == _TRUE_) {
class_store_columntitle(titles,"d_g",_TRUE_);
class_store_columntitle(titles,"d_b",_TRUE_);
class_store_columntitle(titles,"d_cdm",pba->has_cdm);
class_store_columntitle(titles,"d_gdm",pba->has_gdm); // GDM_CLASS
class_store_columntitle(titles,"d_idm_dr",pba->has_idm_dr);
class_store_columntitle(titles,"d_fld",pba->has_fld);
class_store_columntitle(titles,"d_ur",pba->has_ur);
class_store_columntitle(titles,"d_idr",pba->has_idr);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"d_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"d_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"d_dr",pba->has_dr);
class_store_columntitle(titles,"d_scf",pba->has_scf);
class_store_columntitle(titles,"d_tot",_TRUE_);
class_store_columntitle(titles,"phi",ppt->has_source_phi);
class_store_columntitle(titles,"psi",ppt->has_source_psi);
class_store_columntitle(titles,"phi_prime",ppt->has_source_phi_prime);
class_store_columntitle(titles,"h",ppt->has_source_h);
class_store_columntitle(titles,"h_prime",ppt->has_source_h_prime);
class_store_columntitle(titles,"eta",ppt->has_source_eta);
class_store_columntitle(titles,"eta_prime",ppt->has_source_eta_prime);
class_store_columntitle(titles,"H_T_Nb_prime",ppt->has_source_H_T_Nb_prime);
class_store_columntitle(titles,"H_T_Nb_prime",ppt->has_source_k2gamma_Nb);
class_store_columntitle(titles,"k2gamma_Nb",ppt->has_source_k2gamma_Nb);
}
if (ppt->has_velocity_transfers == _TRUE_) {
class_store_columntitle(titles,"t_g",_TRUE_);
class_store_columntitle(titles,"t_b",_TRUE_);
class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)));
class_store_columntitle(titles,"t_gdm",pba->has_gdm); // GDM_CLASS
class_store_columntitle(titles,"t_idm_dr",pba->has_idm_dr);
class_store_columntitle(titles,"t_fld",pba->has_fld);
class_store_columntitle(titles,"t_ur",pba->has_ur);
class_store_columntitle(titles,"t_idr",pba->has_idr);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
sprintf(tmp,"t_ncdm[%d]",n_ncdm);
class_store_columntitle(titles,tmp,_TRUE_);
}
}
class_store_columntitle(titles,"t_dcdm",pba->has_dcdm);
class_store_columntitle(titles,"t_dr",pba->has_dr);
class_store_columntitle(titles,"t__scf",pba->has_scf);
class_store_columntitle(titles,"t_tot",_TRUE_);
}
}
else if (output_format == camb_format) {
class_store_columntitle(titles,"k (h/Mpc)",_TRUE_);
class_store_columntitle(titles,"-T_cdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_gdm/k2",_TRUE_); // GDM_CLASS
class_store_columntitle(titles,"-T_idm_dr/k2",_TRUE_);
class_store_columntitle(titles,"-T_b/k2",_TRUE_);
class_store_columntitle(titles,"-T_g/k2",_TRUE_);
class_store_columntitle(titles,"-T_ur/k2",_TRUE_);
class_store_columntitle(titles,"-T_idr/k2",_TRUE_);
class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_);
class_store_columntitle(titles,"-T_tot/k2",_TRUE_);
}
return _SUCCESS_;
}
/**
* Fill strings that will be used when writing the transfer functions
* and the spectra in files (in the file names and in the comment at the beginning of each file).
*
* @param ppt Input: pointer to the perturbation structure
* @param index_ic Input: index of the initial condition
* @param first_line Output: line of comment
* @param ic_suffix Output: suffix for the output file name
* @return the error status
*
*/
int perturb_output_firstline_and_ic_suffix(
struct perturbs *ppt,
int index_ic,
char first_line[_LINE_LENGTH_MAX_],
FileName ic_suffix
){
first_line[0]='\0';
ic_suffix[0]='\0';
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
strcpy(ic_suffix,"ad");
strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) ");
}
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
strcpy(ic_suffix,"bi");
strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
strcpy(ic_suffix,"cdi");
strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)");
}
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
strcpy(ic_suffix,"nid");
strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)");
}
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
strcpy(ic_suffix,"niv");
strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)");
}
return _SUCCESS_;
}
/**
* Initialize the perturbs structure, and in particular the table of source functions.
*
* Main steps:
*
* - given the values of the flags describing which kind of
* perturbations should be considered (modes: scalar/vector/tensor,
* initial conditions, type of source functions needed...),
* initialize indices and wavenumber list
*
* - define the time sampling for the output source functions
*
* - for each mode (scalar/vector/tensor): initialize the indices of
* relevant perturbations, integrate the differential system,
* compute and store the source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Output: Initialized perturbation structure
* @return the error status
*/
int perturb_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
/* running index for modes */
int index_md;
/* running index for initial conditions */
int index_ic;
/* running index for wavenumbers */
int index_k;
/* running index for type of perturbation */
int index_tp;
/* pointer to one struct perturb_workspace per thread (one if no openmp) */
struct perturb_workspace ** pppw;
/* background quantities */
double w_fld_ini, w_fld_0,dw_over_da_fld,integral_fld;
/* number of threads (always one if no openmp) */
int number_of_threads=1;
/* index of the thread (always 0 if no openmp) */
int thread=0;
/* This code can be optionally compiled with the openmp option for parallel computation.
Inside parallel regions, the use of the command "return" is forbidden.
For error management, instead of "return _FAILURE_", we will set the variable below
to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the
parallel region. */
int abort;
/* unsigned integer that will be set to the size of the workspace */
size_t sz;
#ifdef _OPENMP
/* instrumentation times */
double tstart, tstop, tspent;
#endif
/** - perform preliminary checks */
if (ppt->has_perturbations == _FALSE_) {
if (ppt->perturbations_verbose > 0)
printf("No sources requested. Perturbation module skipped.\n");
return _SUCCESS_;
}
else {
if (ppt->perturbations_verbose > 0)
printf("Computing sources\n");
}
/* GDM_CLASS: can be removed because not exactly true; cf. page 10 of the theory paper
1605.00649 : "We adopt the synchronous gauge by setting Psi=Xi=0. This
gauge has a residual gauge mode which is set to zero by discarding
decaying initial conditions." */
class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_),
ppt->error_message,
"In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge");
class_test ((ppr->tight_coupling_approximation < first_order_MB) ||
(ppr->tight_coupling_approximation > compromise_CLASS),
ppt->error_message,
"your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation);
class_test ((ppr->radiation_streaming_approximation < rsa_null) ||
(ppr->radiation_streaming_approximation > rsa_none),
ppt->error_message,
"your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation);
if (pba->has_idr == _TRUE_){
class_test ((ppr->idr_streaming_approximation < rsa_idr_none) ||
(ppr->idr_streaming_approximation > rsa_idr_MD),
ppt->error_message,
"your idr_radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->idr_streaming_approximation);
}
if (pba->has_ur == _TRUE_) {
class_test ((ppr->ur_fluid_approximation < ufa_mb) ||
(ppr->ur_fluid_approximation > ufa_none),
ppt->error_message,
"your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation);
}
if (pba->has_ncdm == _TRUE_) {
class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) ||
(ppr->ncdm_fluid_approximation > ncdmfa_none),
ppt->error_message,
"your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation);
if (ppt->has_nc_density == _TRUE_) {
if (ppt->perturbations_verbose > 0) {
fprintf(stdout," -> [WARNING:] You request the number count Cl's in presence of non-cold dark matter.\n Like in all previous CLASS and CLASSgal versions, this will be inferred from the total matter density,\n but it could make much more sense physically to compute it from the CDM+baryon density only.\n To get the latter behavior you would just need to change one line in transfer.c:\n search there for a comment starting with 'use here delta_cb'\n");
}
}
}
if (pba->has_fld == _TRUE_) {
/* check values of w_fld at initial time and today */
class_call(background_w_fld(pba, 0., &w_fld_ini,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_call(background_w_fld(pba,pba->a_today,&w_fld_0,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
class_test(w_fld_ini >= 0.,
ppt->error_message,
"The fluid is meant to be negligible at early time, and unimportant for defining the initial conditions of other species. You are using parameters for which this assumption may break down, since at early times you have w_fld(a--->0) = %e >= 0",w_fld_ini);
if (pba->use_ppf == _FALSE_) {
class_test((w_fld_ini +1.0)*(w_fld_0+1.0) <= 0.0,
ppt->error_message,
"w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid perturbations. Try to switch to PPF scheme: use_ppf = yes");
/* the next check is meaningful at least for w(a) = w0 + wa*(1-a/a0); for general formulas and with use_ppf=no, you may prefer to comment it out... */
class_test((w_fld_0 == -1.) && (dw_over_da_fld == 0.),
ppt->error_message,
"Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant or with PPF scheme: use_ppf = yes");
}
}
if (pba->has_dcdm == _TRUE_) {
class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_),
ppt->error_message,
"Non-adiabatic initial conditions not coded in presence of decaying dark matter");
}
class_test(ppt->has_vectors == _TRUE_,
ppt->error_message,
"Vectors not coded yet");
if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) {
printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n");
}
if (ppt->has_tensors == _TRUE_) {
ppt->evolve_tensor_ur = _FALSE_;
ppt->evolve_tensor_ncdm = _FALSE_;
switch (ppt->tensor_method) {
case (tm_photons_only):
break;
case (tm_massless_approximation):
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_))
ppt->evolve_tensor_ur = _TRUE_;
break;
case (tm_exact):
if (pba->has_ur == _TRUE_)
ppt->evolve_tensor_ur = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->evolve_tensor_ncdm = _TRUE_;
break;
}
}
class_test((pba->h > _h_BIG_) || (pba->h < _h_SMALL_),
ppt->error_message,
"Your value of pba->h=%e is out of the bounds [%e , %e] and could cause a crash of the perturbation ODE integration. If you want to force this barrier, you may comment it out in perturbation.c",
pba->h,
_h_SMALL_,
_h_BIG_);
class_test((pba->Omega0_b*pba->h*pba->h < _omegab_SMALL_) || (pba->Omega0_b*pba->h*pba->h > _omegab_BIG_),
ppt->error_message,
"Your value of omega_b=%e is out of the bounds [%e , %e] and could cause a crash of the perturbation ODE integration. If you want to force this barrier, you may comment it out in perturbation.c",
pba->Omega0_b*pba->h*pba->h,
_omegab_SMALL_,
_omegab_BIG_);
/** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */
class_call(perturb_indices_of_perturbs(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
if (ppt->z_max_pk > pth->z_rec) {
class_test(ppt->has_cmb == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you don't ask for a calculation of the CMB source function(s). Remove any CMB from your output and try e.g. with 'output=mTk' or 'output=mTk,vTk'",
ppt->z_max_pk,
pth->z_rec);
class_test(ppt->has_source_delta_m == _TRUE_,
ppt->error_message,
"You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you ask only transfer functions, e.g. with 'output=mTk' or 'output=mTk,vTk'. But if you need the total matter (e.g. with 'mPk', 'dCl', etc.) there is an issue with the calculation of delta_m at very early times. By default, delta_m is a gauge-invariant variable (the density fluctuation in comoving gauge) and this quantity is hard to get accurately at very early times. The solution is to define delta_m as the density fluctuation in the current gauge, synchronous or newtonian. For the moment this must be done manually by commenting the line 'ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;' in perturb_sources(). In the future there will be an option for doing it in an easier way.",
ppt->z_max_pk,
pth->z_rec);
}
/** - define the common time sampling for all sources using
perturb_timesampling_for_sources() */
class_call(perturb_timesampling_for_sources(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - if we want to store perturbations for given k values, write titles and allocate storage */
class_call(perturb_prepare_k_output(pba,ppt),
ppt->error_message,
ppt->error_message);
/** - create an array of workspaces in multi-thread case */
#ifdef _OPENMP
#pragma omp parallel
{
number_of_threads = omp_get_num_threads();
}
#endif
class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message);
/** - loop over modes (scalar, tensors, etc). For each mode: */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
if (ppt->perturbations_verbose > 1)
printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size);
abort = _FALSE_;
sz = sizeof(struct perturb_workspace);
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
/** - --> (a) create a workspace (one per thread in multi-thread case) */
class_alloc_parallel(pppw[thread],sz,ppt->error_message);
/** - --> (b) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */
class_call_parallel(perturb_workspace_init(ppr,
pba,
pth,
ppt,
index_md,
pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
/** - --> (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
if (ppt->perturbations_verbose > 1) {
printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]);
printf("evolving %d wavenumbers\n",ppt->k_size[index_md]);
}
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \
private(index_k,thread,tstart,tstop,tspent) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
tspent=0.;
#endif
#pragma omp for schedule (dynamic)
/* integrating backwards is slightly more optimal for parallel runs */
//for (index_k = 0; index_k < ppt->k_size; index_k++) {
for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) {
if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) {
printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]);
if (pba->sgnK != 0)
printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K));
printf("\n");
}
#ifdef _OPENMP
tstart = omp_get_wtime();
#endif
class_call_parallel(perturb_solve(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
index_k,
pppw[thread]),
ppt->error_message,
ppt->error_message);
#ifdef _OPENMP
tstop = omp_get_wtime();
tspent += tstop-tstart;
#endif
#pragma omp flush(abort)
} /* end of loop over wavenumbers */
#ifdef _OPENMP
if (ppt->perturbations_verbose>2)
printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n",
__func__,tspent,omp_get_thread_num());
#endif
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end of loop over initial conditions */
abort = _FALSE_;
#pragma omp parallel \
shared(pppw,ppt,index_md,abort,number_of_threads) \
private(thread) \
num_threads(number_of_threads)
{
#ifdef _OPENMP
thread=omp_get_thread_num();
#endif
class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]),
ppt->error_message,
ppt->error_message);
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end loop over modes */
free(pppw);
/** - spline the source array with respect to the time variable */
if (ppt->ln_tau_size > 1) {
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
abort = _FALSE_;
#pragma omp parallel \
shared(ppt,index_md,index_ic,abort,number_of_threads) \
private(index_tp) \
num_threads(number_of_threads)
{
#pragma omp for schedule (dynamic)
for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) {
class_call_parallel(array_spline_table_lines(ppt->ln_tau,
ppt->ln_tau_size,
ppt->late_sources[index_md][index_ic * ppt->tp_size[index_md] + index_tp],
ppt->k_size[index_md],
ppt->ddlate_sources[index_md][index_ic*ppt->tp_size[index_md] + index_tp],
_SPLINE_EST_DERIV_,
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
} /* end of parallel region */
if (abort == _TRUE_) return _FAILURE_;
} /* end of loop over initial condition */
} /* end of loop over mode */
}
return _SUCCESS_;
}
/**
* Free all memory space allocated by perturb_init().
*
* To be called at the end of each run, only when no further calls to
* perturb_sources_at_tau() are needed.
*
* @param ppt Input: perturbation structure to be freed
* @return the error status
*/
int perturb_free(
struct perturbs * ppt
) {
int index_md,index_ic,index_tp;
int filenum;
if (ppt->has_perturbations == _TRUE_) {
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) {
free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp]);
if (ppt->ln_tau_size > 1)
free(ppt->ddlate_sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp]);
}
}
free(ppt->sources[index_md]);
free(ppt->late_sources[index_md]);
free(ppt->ddlate_sources[index_md]);
free(ppt->k[index_md]);
}
free(ppt->tau_sampling);
if (ppt->ln_tau_size > 1)
free(ppt->ln_tau);
free(ppt->tp_size);
free(ppt->ic_size);
free(ppt->k);
free(ppt->k_size_cmb);
free(ppt->k_size_cl);
free(ppt->k_size);
free(ppt->sources);
free(ppt->late_sources);
free(ppt->ddlate_sources);
if (ppt->alpha_idm_dr != NULL)
free(ppt->alpha_idm_dr);
if (ppt->beta_idr != NULL)
free(ppt->beta_idr);
/** Stuff related to perturbations output: */
/** - Free non-NULL pointers */
if (ppt->index_k_output_values != NULL)
free(ppt->index_k_output_values);
for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){
if (ppt->scalar_perturbations_data[filenum] != NULL)
free(ppt->scalar_perturbations_data[filenum]);
if (ppt->vector_perturbations_data[filenum] != NULL)
free(ppt->vector_perturbations_data[filenum]);
if (ppt->tensor_perturbations_data[filenum] != NULL)
free(ppt->tensor_perturbations_data[filenum]);
}
}
return _SUCCESS_;
}
/**
* Initialize all indices and allocate most arrays in perturbs structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_indices_of_perturbs(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int index_type;
int index_md;
int index_ic;
int index_type_common;
int filenum;
/** - count modes (scalar, vector, tensor) and assign corresponding indices */
index_md = 0;
class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1);
class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1);
class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1);
ppt->md_size = index_md;
class_test(index_md == 0,
ppt->error_message,
"you should have at least one out of {scalars, vectors, tensors} !!!");
/** - allocate array of number of types for each mode, ppt->tp_size[index_md] */
class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */
class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message);
/** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */
class_alloc(ppt->sources, ppt->md_size * sizeof(double *),ppt->error_message);
class_alloc(ppt->late_sources, ppt->md_size * sizeof(double *),ppt->error_message);
class_alloc(ppt->ddlate_sources,ppt->md_size * sizeof(double *),ppt->error_message);
/** - initialize variables for the output of k values */
ppt->index_k_output_values=NULL;
ppt->number_of_scalar_titles=0;
ppt->number_of_vector_titles=0;
ppt->number_of_tensor_titles=0;
for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){
ppt->scalar_perturbations_data[filenum] = NULL;
ppt->vector_perturbations_data[filenum] = NULL;
ppt->tensor_perturbations_data[filenum] = NULL;
}
/** - initialization of all flags to false (will eventually be set to true later) */
ppt->has_cmb = _FALSE_;
ppt->has_lss = _FALSE_;
ppt->has_source_t = _FALSE_;
ppt->has_source_p = _FALSE_;
ppt->has_source_delta_m = _FALSE_;
ppt->has_source_delta_cb = _FALSE_;
ppt->has_source_delta_tot = _FALSE_;
ppt->has_source_delta_g = _FALSE_;
ppt->has_source_delta_b = _FALSE_;
ppt->has_source_delta_cdm = _FALSE_;
ppt->has_source_delta_gdm = _FALSE_; // GDM_CLASS
ppt->has_source_delta_dcdm = _FALSE_;
ppt->has_source_delta_fld = _FALSE_;
ppt->has_source_delta_scf = _FALSE_;
ppt->has_source_delta_dr = _FALSE_;
ppt->has_source_delta_ur = _FALSE_;
ppt->has_source_delta_idr = _FALSE_;
ppt->has_source_delta_idm_dr = _FALSE_;
ppt->has_source_delta_ncdm = _FALSE_;
ppt->has_source_theta_m = _FALSE_;
ppt->has_source_theta_cb = _FALSE_;
ppt->has_source_theta_tot = _FALSE_;
ppt->has_source_theta_g = _FALSE_;
ppt->has_source_theta_b = _FALSE_;
ppt->has_source_theta_cdm = _FALSE_;
ppt->has_source_theta_gdm = _FALSE_; // GDM_CLASS
ppt->has_source_theta_dcdm = _FALSE_;
ppt->has_source_theta_fld = _FALSE_;
ppt->has_source_theta_scf = _FALSE_;
ppt->has_source_theta_dr = _FALSE_;
ppt->has_source_theta_ur = _FALSE_;
ppt->has_source_theta_idr = _FALSE_;
ppt->has_source_theta_idm_dr = _FALSE_;
ppt->has_source_theta_ncdm = _FALSE_;
ppt->has_source_phi = _FALSE_;
ppt->has_source_phi_prime = _FALSE_;
ppt->has_source_phi_plus_psi = _FALSE_;
ppt->has_source_psi = _FALSE_;
ppt->has_source_h = _FALSE_;
ppt->has_source_h_prime = _FALSE_;
ppt->has_source_eta = _FALSE_;
ppt->has_source_eta_prime = _FALSE_;
ppt->has_source_H_T_Nb_prime = _FALSE_;
ppt->has_source_k2gamma_Nb = _FALSE_;
/** - source flags and indices, for sources that all modes have in
common (temperature, polarization, ...). For temperature, the
term t2 is always non-zero, while other terms are non-zero only
for scalars and vectors. For polarization, the term e is always
non-zero, while the term b is only for vectors and tensors. */
if (ppt->has_cl_cmb_temperature == _TRUE_) {
ppt->has_source_t = _TRUE_;
ppt->has_cmb = _TRUE_;
}
if (ppt->has_cl_cmb_polarization == _TRUE_) {
ppt->has_source_p = _TRUE_;
ppt->has_cmb = _TRUE_;
}
index_type = 0;
class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1);
class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1);
index_type_common = index_type;
/* indices for perturbed recombination */
class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1);
class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1);
/** - define k values with perturb_get_k_list() */
class_call(perturb_get_k_list(ppr,
pba,
pth,
ppt),
ppt->error_message,
ppt->error_message);
/** - loop over modes. Initialize flags and indices which are specific to each mode. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
/** - (a) scalars */
if (_scalars_) {
/** - --> source flags and indices, for sources that are specific to scalars */
if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) {
ppt->has_lss = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_){
ppt->has_source_delta_cb = _TRUE_;
}
}
if (ppt->has_density_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_delta_tot = _TRUE_;
ppt->has_source_delta_g = _TRUE_;
ppt->has_source_delta_b = _TRUE_;
if (pba->has_cdm == _TRUE_)
ppt->has_source_delta_cdm = _TRUE_;
/* GDM_CLASS */
if (pba->has_gdm == _TRUE_)
ppt->has_source_delta_gdm = _TRUE_;
/* END GDM_CLASS */
if (pba->has_dcdm == _TRUE_)
ppt->has_source_delta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_delta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_delta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_delta_ur = _TRUE_;
if (pba->has_idr == _TRUE_)
ppt->has_source_delta_idr = _TRUE_;
if (pba->has_idm_dr == _TRUE_)
ppt->has_source_delta_idm_dr = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_delta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_delta_ncdm = _TRUE_;
// Thanks to the following lines, (phi,psi) are also stored as sources
// (Obtained directly in newtonian gauge, infereed from (h,eta) in synchronous gauge).
// If density transfer functions are requested in the (default) CLASS format,
// (phi, psi) will be appended to the delta_i's in the final output.
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
}
if (ppt->has_velocity_transfers == _TRUE_) {
ppt->has_lss = _TRUE_;
ppt->has_source_theta_tot = _TRUE_;
ppt->has_source_theta_g = _TRUE_;
ppt->has_source_theta_b = _TRUE_;
if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))
ppt->has_source_theta_cdm = _TRUE_;
/* GDM_CLASS */
if (pba->has_gdm == _TRUE_)
ppt->has_source_theta_gdm = _TRUE_;
/* END GDM_CLASS */
if (pba->has_dcdm == _TRUE_)
ppt->has_source_theta_dcdm = _TRUE_;
if (pba->has_fld == _TRUE_)
ppt->has_source_theta_fld = _TRUE_;
if (pba->has_scf == _TRUE_)
ppt->has_source_theta_scf = _TRUE_;
if (pba->has_ur == _TRUE_)
ppt->has_source_theta_ur = _TRUE_;
if (pba->has_idr == _TRUE_)
ppt->has_source_theta_idr = _TRUE_;
if (pba->has_idm_dr == _TRUE_)
ppt->has_source_theta_idm_dr = _TRUE_;
if (pba->has_dr == _TRUE_)
ppt->has_source_theta_dr = _TRUE_;
if (pba->has_ncdm == _TRUE_)
ppt->has_source_theta_ncdm = _TRUE_;
}
if (ppt->has_cl_number_count == _TRUE_) {
ppt->has_lss = _TRUE_;
if (ppt->has_nc_density == _TRUE_) {
ppt->has_source_delta_m = _TRUE_;
}
if (ppt->has_nc_rsd == _TRUE_) {
ppt->has_source_theta_m = _TRUE_;
if (pba->has_ncdm == _TRUE_)
/* we may not need theta_cb at all, rsd always defined for
the total matter, but at least this is made
available */
ppt->has_source_theta_cb = _TRUE_;
}
if (ppt->has_nc_lens == _TRUE_) {
ppt->has_source_phi_plus_psi = _TRUE_;
}
if (ppt->has_nc_gr == _TRUE_) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
ppt->has_source_phi_plus_psi = _TRUE_;
}
}
if ( ppt->has_metricpotential_transfers == _TRUE_ ) {
if (ppt->gauge == newtonian) {
ppt->has_source_phi = _TRUE_;
ppt->has_source_psi = _TRUE_;
ppt->has_source_phi_prime = _TRUE_;
}
if (ppt->gauge == synchronous) {
ppt->has_source_h = _TRUE_;
ppt->has_source_h_prime = _TRUE_;
ppt->has_source_eta = _TRUE_;
ppt->has_source_eta_prime = _TRUE_;
}
ppt->has_source_H_T_Nb_prime = _TRUE_;
ppt->has_source_k2gamma_Nb = _TRUE_;
}
if (ppt->has_Nbody_gauge_transfers == _TRUE_){
if (ppt->gauge == synchronous) {
ppt->has_source_h_prime = _TRUE_;
ppt->has_source_eta_prime = _TRUE_;
}
ppt->has_source_H_T_Nb_prime = _TRUE_;
/** gamma is not neccessary for converting output to Nbody gauge but is included anyway. */
ppt->has_source_k2gamma_Nb = _TRUE_;
}
index_type = index_type_common;
class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1);
class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1);
class_define_index(ppt->index_tp_delta_cb, ppt->has_source_delta_cb, index_type,1);
class_define_index(ppt->index_tp_delta_tot, ppt->has_source_delta_tot, index_type,1);
class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1);
class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1);
class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1);
class_define_index(ppt->index_tp_delta_gdm, ppt->has_source_delta_gdm, index_type,1); // GDM_CLASS
class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1);
class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1);
class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1);
class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1);
class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1);
class_define_index(ppt->index_tp_delta_idr, ppt->has_source_delta_idr, index_type,1);
class_define_index(ppt->index_tp_delta_idm_dr, ppt->has_source_delta_idm_dr, index_type,1);
class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1);
class_define_index(ppt->index_tp_theta_cb, ppt->has_source_theta_cb, index_type,1);
class_define_index(ppt->index_tp_theta_tot, ppt->has_source_theta_tot, index_type,1);
class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1);
class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1);
class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1);
class_define_index(ppt->index_tp_theta_gdm, ppt->has_source_theta_gdm, index_type,1); // GDM_CLASS
class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1);
class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1);
class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1);
class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1);
class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1);
class_define_index(ppt->index_tp_theta_idr, ppt->has_source_theta_idr, index_type,1);
class_define_index(ppt->index_tp_theta_idm_dr, ppt->has_source_theta_idm_dr, index_type,1);
class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm);
class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1);
class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1);
class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1);
class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1);
class_define_index(ppt->index_tp_h, ppt->has_source_h, index_type,1);
class_define_index(ppt->index_tp_h_prime, ppt->has_source_h_prime, index_type,1);
class_define_index(ppt->index_tp_eta, ppt->has_source_eta, index_type,1);
class_define_index(ppt->index_tp_eta_prime, ppt->has_source_eta_prime, index_type,1);
class_define_index(ppt->index_tp_H_T_Nb_prime,ppt->has_source_H_T_Nb_prime,index_type,1);
class_define_index(ppt->index_tp_k2gamma_Nb, ppt->has_source_k2gamma_Nb,index_type,1);
ppt->tp_size[index_md] = index_type;
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarization, lensing/gravitational potential, ...). Please adjust your input.");
/** - --> count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */
index_ic = 0;
class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1);
class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1);
class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1);
class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1);
class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1);
ppt->ic_size[index_md] = index_ic;
class_test(index_ic == 0,
ppt->error_message,
"you should have at least one adiabatic or isocurvature initial condition...} !!!");
}
/** - (b) vectors */
if (_vectors_) {
/** - --> source flags and indices, for sources that are specific to vectors */
index_type = index_type_common;
class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1);
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarization). Please adjust your input.");
*/
/** - --> initial conditions for vectors*/
index_ic = 0;
/* not coded yet */
ppt->ic_size[index_md] = index_ic;
}
/** - (c) tensors */
if (_tensors_) {
/** - --> source flags and indices, for sources that are specific to tensors */
index_type = index_type_common;
/* nothing specific, unlike for vectors and scalars! */
ppt->tp_size[index_md] = index_type;
/*
class_test(index_type == 0,
ppt->error_message,
"inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarization). Please adjust your input.");
*/
/** - --> only one initial condition for tensors*/
index_ic = 0;
class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1);
ppt->ic_size[index_md] = index_ic;
}
/** - (d) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */
class_alloc(ppt->sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
class_alloc(ppt->late_sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
class_alloc(ppt->ddlate_sources[index_md],
ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *),
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Define time sampling for source functions.
*
* For each type, compute the list of values of tau at which sources
* will be sampled. Knowing the number of tau values, allocate all
* arrays of source functions.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input/Output: Initialized perturbation structure
* @return the error status
*/
int perturb_timesampling_for_sources(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
/** Summary: */
/** - define local variables */
int counter;
int index_md;
int index_tp;
int index_ic;
int index_tau;
int last_index_back;
int last_index_thermo;
int first_index_back;
int first_index_thermo;
double tau;
double tau_ini;
double tau_lower;
double tau_upper;
double tau_mid;
double timescale_source;
double rate_thermo;
double rate_isw_squared;
double a_prime_over_a;
double a_primeprime_over_a;
double * pvecback;
double * pvecthermo;
/** - allocate background/thermodynamics vectors */
class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message);
class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
/** - first, just count the number of sampling points in order to allocate the array containing all values */
/** - (a) if CMB requested, first sampling point = when the universe
stops being opaque; otherwise, start sampling gravitational
potential at recombination [however, if perturbed recombination
is requested, we also need to start the system before
recombination. Otherwise, the initial conditions for gas
temperature and ionization fraction perturbations (delta_T = 1/3
delta_b, delta_x_e) are not valid]. */
if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) {
/* using bisection, search time tau such that the ratio of thermo
to Hubble time scales tau_c/tau_h=aH/kappa' is equal to
start_sources_at_tau_c_over_tau_h */
tau_lower = pth->tau_ini;
class_call(background_at_tau(pba,
tau_lower,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n",
tau_lower);
tau_upper = pth->tau_rec;
class_call(background_at_tau(pba,
tau_upper,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
class_test(pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] <
ppr->start_sources_at_tau_c_over_tau_h,
ppt->error_message,
"your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n");
tau_mid = 0.5*(tau_lower + tau_upper);
while (tau_upper - tau_lower > ppr->tol_tau_approx) {
class_call(background_at_tau(pba,
tau_mid,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (pvecback[pba->index_bg_a]*
pvecback[pba->index_bg_H]/
pvecthermo[pth->index_th_dkappa] >
ppr->start_sources_at_tau_c_over_tau_h)
tau_upper = tau_mid;
else
tau_lower = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau_ini = tau_mid;
}
else {
/* check the time corresponding to the highest redshift requested in output plus one */
class_call(background_tau_of_z(pba,
ppt->z_max_pk+1,
&tau_ini),
pba->error_message,
ppt->error_message);
/* obsolete: previous choice was to start always at recombination time */
/* tau_ini = pth->tau_rec; */
/* set values of first_index_back/thermo */
class_call(background_at_tau(pba,
tau_ini,
pba->short_info,
pba->inter_normal,
&first_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&first_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
}
/** - (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where:
- --> if CMB requested:
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
- --> if CMB not requested:
timescale_source = 1/aH; repeat till today.
*/
counter = 1;
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
/* variation rate given by Hubble time */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
}
/** - --> infer total number of time steps, ppt->tau_size */
ppt->tau_size = counter;
/** - --> allocate array of time steps, ppt->tau_sampling[index_tau] */
class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message);
/** - --> repeat the same steps, now filling the array with each tau value: */
/** - --> (b.1.) first sampling point = when the universe stops being opaque */
counter = 0;
ppt->tau_sampling[counter]=tau_ini;
/** - --> (b.2.) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where
timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$;
timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and
timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today.
If CMB not requested:
timescale_source = 1/aH; repeat till today. */
last_index_back = first_index_back;
last_index_thermo = first_index_thermo;
tau = tau_ini;
while (tau < pba->conformal_age) {
class_call(background_at_tau(pba,
tau,
pba->short_info,
pba->inter_closeby,
&last_index_back,
pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&last_index_thermo,
pvecback,
pvecthermo),
pth->error_message,
ppt->error_message);
if (ppt->has_cmb == _TRUE_) {
/* variation rate of thermodynamics variables */
rate_thermo = pvecthermo[pth->index_th_rate];
/* variation rate of metric due to late ISW effect (important at late times) */
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a]
+ 2. * a_prime_over_a * a_prime_over_a;
rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a);
/* compute rate */
timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared);
}
else {
a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a];
timescale_source = a_prime_over_a;
}
/* check it is non-zero */
class_test(timescale_source == 0.,
ppt->error_message,
"null evolution rate, integration is diverging");
/* compute inverse rate */
timescale_source = 1./timescale_source;
class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation,
ppt->error_message,
"integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source);
tau = tau + ppr->perturb_sampling_stepsize*timescale_source;
counter++;
ppt->tau_sampling[counter]=tau;
}
/** - last sampling point = exactly today */
ppt->tau_sampling[counter] = pba->conformal_age;
free(pvecback);
free(pvecthermo);
/** - check the maximum redshift z_max_pk at which the Fourier
transfer functions \f$ T_i(k,z)\f$ should be computable by
interpolation. If it is equal to zero, only \f$ T_i(k,z=0)\f$
needs to be computed. If it is higher, we will store a table of
log(tau) in the relevant time range, generously encompassing the
range 0<z<z_max_pk, and used for the intepolation of sources */
/* if z_max_pk<0, return error */
class_test(ppt->z_max_pk < 0,
ppt->error_message,
"asked for negative redshift z=%e",ppt->z_max_pk);
/* if z_max_pk=0, there is just one value to store */
if (ppt->z_max_pk == 0.) {
ppt->ln_tau_size=1;
}
/* if z_max_pk>0, store several values (with a comfortable margin above z_max_pk) in view of interpolation */
else{
/* find the first relevant value of tau (last value in the table tau_sampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */
class_call(background_tau_of_z(pba,ppt->z_max_pk,&tau_lower),
pba->error_message,
ppt->error_message);
index_tau=0;
class_test((tau_lower <= ppt->tau_sampling[index_tau]),
ppt->error_message,
"you asked for zmax=%e, i.e. taumin=%e, smaller than or equal to the first possible value =%e; it should be strictly bigger for a successfull interpolation",ppt->z_max_pk,tau_lower,ppt->tau_sampling[0]);
while (ppt->tau_sampling[index_tau] < tau_lower){
index_tau++;
}
index_tau --;
class_test(index_tau<0,
ppt->error_message,
"by construction, this should never happen, a bug must have been introduced somewhere");
/* whenever possible, take a few more values in to avoid boundary effects in the interpolation */
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
if (index_tau>0) index_tau--;
ppt->ln_tau_size=ppt->tau_size-index_tau;
/* allocate and fill array of log(tau) */
class_alloc(ppt->ln_tau,ppt->ln_tau_size * sizeof(double),ppt->error_message);
for (index_tau=0; index_tau<ppt->ln_tau_size; index_tau++) {
ppt->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-ppt->ln_tau_size+ppt->tau_size]);
}
}
/** - loop over modes, initial conditions and types. For each of
them, allocate array of source functions. */
for (index_md = 0; index_md < ppt->md_size; index_md++) {
for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) {
for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) {
class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp],
ppt->k_size[index_md] * ppt->tau_size * sizeof(double),
ppt->error_message);
if (ppt->ln_tau_size > 1) {
/* late_sources is just a pointer to the end of sources (starting from the relevant time index) */
ppt->late_sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp] = &(ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + index_tp]
[(ppt->tau_size-ppt->ln_tau_size) * ppt->k_size[index_md]]);
class_alloc(ppt->ddlate_sources[index_md][index_ic*ppt->tp_size[index_md]+index_tp],
ppt->k_size[index_md] * ppt->ln_tau_size * sizeof(double),
ppt->error_message);
}
}
}
}
return _SUCCESS_;
}
/**
* Define the number of comoving wavenumbers using the information
* passed in the precision structure.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @return the error status
*/
int perturb_get_k_list(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt
) {
int index_k, index_k_output, index_mode;
double k,k_min=0.,k_rec,step,tau1;
double * k_max_cmb;
double * k_max_cl;
double k_max=0.;
double scale2;
double *tmp_k_list;
int newk_size, index_newk, add_k_output_value;
/** Summary: */
class_test(ppr->k_step_transition == 0.,
ppt->error_message,
"stop to avoid division by zero");
class_test(pth->rs_rec == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - allocate arrays related to k list for each mode */
class_alloc(ppt->k_size_cmb,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size_cl,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k_size,
ppt->md_size*sizeof(int),
ppt->error_message);
class_alloc(ppt->k,
ppt->md_size*sizeof(double*),
ppt->error_message);
class_calloc(k_max_cmb,
ppt->md_size,
sizeof(double),
ppt->error_message);
class_calloc(k_max_cl,
ppt->md_size,
sizeof(double),
ppt->error_message);
/** - scalar modes */
if (ppt->has_scalars == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((8.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_scalars] = k_min;
k_max_cl[ppt->index_md_scalars] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_scalars] : */
/* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars];
k_max = k_max_cmb[ppt->index_md_scalars];
/* find k_max_cl[ppt->index_md_scalars] : */
/* if we need density/lensing Cl's, we must impose a stronger condition,
such that the minimum wavelength on the shell corresponding
to the center of smallest redshift bin is seen under an
angle smaller than pi/lmax. So we must multiply our previous
k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest
redshift bin]). Note that we could do the same with the
lensing potential if we needed a very precise C_l^phi-phi at
large l. We don't do it by default, because the lensed ClT,
ClE would be marginally affected. */
if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) {
class_call(background_tau_of_z(pba,
ppt->selection_mean[0],
&tau1),
pba->error_message,
ppt->error_message);
k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redshift instead of comoving radius: would implement corrections depending on curvature
k_max = k_max_cl[ppt->index_md_scalars];
}
}
/* find k_max: */
if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m == _TRUE_))
k_max = MAX(k_max,ppt->k_max_for_pk);
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
/* the following is a boost on k_per_decade_for_pk for the interacting idm-idr cases (relevant for large k and a_idm_dr) */
if((pba->has_idm_dr==_TRUE_)&&(pth->nindex_idm_dr>=2)){
class_alloc(ppt->k[ppt->index_md_scalars],
((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+
(int)(MAX(ppr->k_per_decade_for_pk*ppr->idmdr_boost_k_per_decade_for_pk*pth->nindex_idm_dr,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3)
*sizeof(double),ppt->error_message);
}
else{
class_alloc(ppt->k[ppt->index_md_scalars],
((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+
(int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3)
*sizeof(double),ppt->error_message);
}
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_scalars] */
while (k < k_max_cmb[ppt->index_md_scalars]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_scalars] = index_k;
/* values until k_max_cl[ppt->index_md_scalars] */
while (k < k_max_cl[ppt->index_md_scalars]) {
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size_cl[ppt->index_md_scalars] = index_k;
/* values until k_max */
while (k < k_max) {
if((pba->has_idm_dr==_TRUE_)&&(pth->nindex_idm_dr>=2)){
k *= pow(10.,1./(ppr->k_per_decade_for_pk*ppr->idmdr_boost_k_per_decade_for_pk*pth->nindex_idm_dr
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk*ppr->idmdr_boost_k_per_decade_for_pk*pth->nindex_idm_dr)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
}
else{
k *= pow(10.,1./(ppr->k_per_decade_for_pk
+(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk)
*(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4)))));
}
ppt->k[ppt->index_md_scalars][index_k] = k;
index_k++;
}
ppt->k_size[ppt->index_md_scalars] = index_k;
class_realloc(ppt->k[ppt->index_md_scalars],
ppt->k[ppt->index_md_scalars],
ppt->k_size[ppt->index_md_scalars]*sizeof(double),
ppt->error_message);
}
/** - vector modes */
if (ppt->has_vectors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((7.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_vectors] = k_min;
k_max_cl[ppt->index_md_vectors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb: */
/* choose a k_max_cmb corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors];
k_max = k_max_cmb[ppt->index_md_vectors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_vectors],
((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_vectors] */
while (k < k_max_cmb[ppt->index_md_vectors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_vectors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_vectors] = index_k;
ppt->k_size_cl[ppt->index_md_vectors] = index_k;
ppt->k_size[ppt->index_md_vectors] = index_k;
class_realloc(ppt->k[ppt->index_md_vectors],
ppt->k[ppt->index_md_vectors],
ppt->k_size[ppt->index_md_vectors]*sizeof(double),
ppt->error_message);
}
/** - tensor modes */
if (ppt->has_tensors == _TRUE_) {
/* first value */
if (pba->sgnK == 0) {
/* K<0 (flat) : start close to zero */
k_min=ppr->k_min_tau0/pba->conformal_age;
}
else if (pba->sgnK == -1) {
/* K<0 (open) : start close to sqrt(-K)
(in transfer modules, for scalars, this will correspond to q close to zero;
for vectors and tensors, this value is even smaller than the minimum necessary value) */
k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2));
}
else if (pba->sgnK == 1) {
/* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */
k_min = sqrt((6.-1.e-4)*pba->K);
}
/** - --> find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */
k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */
k_max_cmb[ppt->index_md_tensors] = k_min;
k_max_cl[ppt->index_md_tensors] = k_min;
k_max = k_min;
if (ppt->has_cls == _TRUE_) {
/* find k_max_cmb[ppt->index_md_tensors]: */
/* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last
scattering surface seen today under an angle smaller than
pi/lmax: this is equivalent to
k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */
k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max
/pba->conformal_age/pth->angular_rescaling;
k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors];
k_max = k_max_cmb[ppt->index_md_tensors];
}
/** - --> test that result for k_min, k_max make sense */
class_test(k_min<0.,
ppt->error_message,
"buggy definition of k_min");
class_test(k_max<0.,
ppt->error_message,
"buggy definition of k_max");
class_test(k_max<k_min,
ppt->error_message,
"buggy definition of k_min and/or k_max");
/* if K>0, the transfer function will be calculated for discrete
integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and
m=0,1,2 for scalars/vectors/tensors. However we are free to
define in the perturbation module some arbitrary values of k:
later on, the transfer module will interpolate at values of k
corresponding exactly to integer values of nu. Hence, apart
from the value of k_min and the step size in the vicinity of
k_min, we define exactly the same sampling in the three cases
K=0, K<0, K>0 */
/* allocate array with, for the moment, the largest possible size */
class_alloc(ppt->k[ppt->index_md_tensors],
((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1)
*sizeof(double),ppt->error_message);
/* first value */
index_k=0;
k = k_min;
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
/* values until k_max_cmb[ppt->index_md_tensors] */
while (k < k_max_cmb[ppt->index_md_tensors]) {
/* the linear step is not constant, it has a step-like shape,
centered around the characteristic scale set by the sound
horizon at recombination (associated to the comoving wavenumber
k_rec) */
step = (ppr->k_step_super
+ 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.)
* (ppr->k_step_sub-ppr->k_step_super)) * k_rec;
/* there is one other thing to take into account in the step
size. There are two other characteristic scales that matter for
the sampling: the Hubble scale today, k0=a0H0, and eventually
curvature scale sqrt(|K|). We define "scale2" as the sum of the
squared Hubble radius and squared curvature radius. We need to
increase the sampling for k<sqrt(scale2), in order to get the
first mutipoles accurate enough. The formula below reduces it
gradually in the k-->0 limit, by up to a factor 10. The actual
stepsize is still fixed by k_step_super, this is just a
reduction factor. */
scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K);
step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction);
class_test(step / k < ppr->smallest_allowed_variation,
ppt->error_message,
"k step =%e < machine precision : leads either to numerical error or infinite loop",
step * k_rec);
k += step;
class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1],
ppt->error_message,
"consecutive values of k should differ and should be in growing order");
ppt->k[ppt->index_md_tensors][index_k] = k;
index_k++;
}
ppt->k_size_cmb[ppt->index_md_tensors] = index_k;
ppt->k_size_cl[ppt->index_md_tensors] = index_k;
ppt->k_size[ppt->index_md_tensors] = index_k;
class_realloc(ppt->k[ppt->index_md_tensors],
ppt->k[ppt->index_md_tensors],
ppt->k_size[ppt->index_md_tensors]*sizeof(double),
ppt->error_message);
}
/** - If user asked for k_output_values, add those to all k lists: */
if (ppt->k_output_values_num > 0) {
/* Allocate storage */
class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message);
/** - --> Find indices in ppt->k[index_md] corresponding to 'k_output_values'.
We are assuming that ppt->k is sorted and growing, and we have made sure
that ppt->k_output_values is also sorted and growing.*/
for (index_mode=0; index_mode<ppt->md_size; index_mode++){
newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num;
class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message);
index_k=0;
index_k_output=0;
for (index_newk=0; index_newk<newk_size; index_newk++){
/** - --> Decide if we should add k_output_value now. This has to be this complicated, since we
can only compare the k-values when both indices are in range.*/
if (index_k >= ppt->k_size[index_mode])
add_k_output_value = _TRUE_;
else if (index_k_output >= ppt->k_output_values_num)
add_k_output_value = _FALSE_;
else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k])
add_k_output_value = _TRUE_;
else
add_k_output_value = _FALSE_;
if (add_k_output_value == _TRUE_){
tmp_k_list[index_newk] = ppt->k_output_values[index_k_output];
ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk;
index_k_output++;
}
else{
tmp_k_list[index_newk] = ppt->k[index_mode][index_k];
index_k++;
}
}
free(ppt->k[index_mode]);
ppt->k[index_mode] = tmp_k_list;
ppt->k_size[index_mode] = newk_size;
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cl[index_mode])
index_k--;
ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
index_k = newk_size-1;
while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode])
index_k--;
ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]);
/** - --> The two MIN statements are here because in a normal run, the cl and cmb
arrays contain a single k value larger than their respective k_max.
We are mimicking this behavior. */
}
}
/* For testing, can be useful to print the k list in a file:
FILE * out=fopen("output/k","w");
for (index_k=0; index_k < ppt->k_size[0]; index_k++) {
fprintf(out,"%e\n",ppt->k[0][index_k],pba->K);
}
fclose(out);
*/
/** - finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */
ppt->k_min = _HUGE_;
ppt->k_max = 0.;
if (ppt->has_scalars == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_vectors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */
}
if (ppt->has_tensors == _TRUE_) {
ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */
ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */
}
free(k_max_cmb);
free(k_max_cl);
return _SUCCESS_;
}
/**
* Initialize a perturb_workspace structure. All fields are allocated
* here, with the exception of the perturb_vector '-->pv' field, which
* is allocated separately in perturb_vector_init. We allocate one
* such perturb_workspace structure per thread and per mode
* (scalar/../tensor). Then, for each thread, all initial conditions
* and wavenumbers will use the same workspace.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here
* @return the error status
*/
int perturb_workspace_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
int index_mt=0;
int index_ap;
int l;
/** - Compute maximum l_max for any multipole */;
if (_scalars_) {
ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if ((pba->has_idr == _TRUE_) && (ppt->idr_nature == idr_free_streaming)) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_idr);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr);
}
if (_tensors_) {
ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten);
if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur);
if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm);
}
/** - Allocate \f$ s_l\f$[ ] array for freestreaming of multipoles (see arXiv:1305.3261) and initialize
to 1.0, which is the K=0 value. */
class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message);
for (l=0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = 1.0;
}
/** - define indices of metric perturbations obeying constraint
equations (this can be done once and for all, because the
vector of metric perturbations is the same whatever the
approximation scheme, unlike the vector of quantities to
be integrated, which is allocated separately in
perturb_vector_init) */
if (_scalars_) {
/* GDM_CLASS: algebraic fluid shear. Although mt stands for metric, */
/* this is a natural place for all constrained perturbed quantities */
if ((pba->has_gdm == _TRUE_) && (ppt->dynamic_shear_gdm == _FALSE_)) {
class_define_index(ppw->index_mt_shear_gdm,_TRUE_,index_mt,1);
}
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */
class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */
}
/* synchronous gauge (note that eta is counted in the vector of
quantities to be integrated, while here we only consider
quantities obeying to constraint equations) */
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */
class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */
class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */
class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */
class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */
}
}
if (_vectors_) {
/* newtonian gauge */
if (ppt->gauge == newtonian) {
class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1);
}
if (ppt->gauge == synchronous) {
class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1);
}
}
if (_tensors_) {
class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1);
}
ppw->mt_size = index_mt;
/** - allocate some workspace in which we will store temporarily the
values of background, thermodynamics, metric and source
quantities at a given time */
class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message);
class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message);
/** - count number of approximations, initialize their indices, and allocate their flags */
index_ap=0;
class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1);
class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1);
if (_scalars_) {
class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1);
class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1);
class_define_index(ppw->index_ap_tca_idm_dr,pba->has_idm_dr,index_ap,1);
class_define_index(ppw->index_ap_rsa_idr,pba->has_idr,index_ap,1);
}
ppw->ap_size=index_ap;
if (ppw->ap_size > 0)
class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message);
/** - For definiteness, initialize approximation flags to arbitrary
values (correct values are overwritten in
pertub_find_approximation_switches) */
if (_scalars_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
if (pba->has_idr == _TRUE_)
ppw->approx[ppw->index_ap_rsa_idr]=(int)rsa_idr_off;
if (pba->has_idm_dr == _TRUE_)
ppw->approx[ppw->index_ap_tca_idm_dr]=(int)tca_idm_dr_on;
if (pba->has_ur == _TRUE_) {
ppw->approx[ppw->index_ap_ufa]=(int)ufa_off;
}
if (pba->has_ncdm == _TRUE_) {
ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off;
}
}
if (_tensors_) {
ppw->approx[ppw->index_ap_tca]=(int)tca_on;
ppw->approx[ppw->index_ap_rsa]=(int)rsa_off;
}
/** - allocate fields where some of the perturbations are stored */
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message);
}
}
return _SUCCESS_;
}
/**
* Free the perturb_workspace structure (with the exception of the
* perturb_vector '-->pv' field, which is freed separately in
* perturb_vector_free).
*
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param ppw Input: pointer to perturb_workspace structure to be freed
* @return the error status
*/
int perturb_workspace_free (
struct perturbs * ppt,
int index_md,
struct perturb_workspace * ppw
) {
free(ppw->s_l);
free(ppw->pvecback);
free(ppw->pvecthermo);
free(ppw->pvecmetric);
if (ppw->ap_size > 0)
free(ppw->approx);
if (_scalars_) {
if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
free(ppw->delta_ncdm);
free(ppw->theta_ncdm);
free(ppw->shear_ncdm);
}
}
free(ppw);
return _SUCCESS_;
}
/**
* Solve the perturbation evolution for a given mode, initial
* condition and wavenumber, and compute the corresponding source
* functions.
*
* For a given mode, initial condition and wavenumber, this function
* finds the time ranges over which the perturbations can be described
* within a given approximation. For each such range, it initializes
* (or redistributes) perturbations using perturb_vector_init(), and
* integrates over time. Whenever a "source sampling time" is passed,
* the source terms are computed and stored in the source table using
* perturb_sources().
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here)
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param index_k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @return the error status
*/
int perturb_solve(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
int index_k,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */
struct perturb_parameters_and_workspace ppaw;
/* conformal time */
double tau,tau_lower,tau_upper,tau_mid;
/* multipole */
int l;
/* index running over time */
int index_tau;
/* number of values in the tau_sampling array that should be considered for a given mode */
int tau_actual_size;
/* running index over types (temperature, etc) */
int index_tp;
/* Fourier mode */
double k;
/* number of time intervals where the approximation scheme is uniform */
int interval_number;
/* index running over such time intervals */
int index_interval;
/* number of time intervals where each particular approximation is uniform */
int * interval_number_of;
/* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */
double * interval_limit;
/* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */
int ** interval_approx;
/* index running over approximations */
int index_ap;
/* approximation scheme within previous interval: previous_approx[index_ap] */
int * previous_approx;
int n_ncdm,is_early_enough;
/* function pointer to ODE evolver and names of possible evolvers */
extern int evolver_rk();
extern int evolver_ndf15();
int (*generic_evolver)();
/* Related to the perturbation output */
int (*perhaps_print_variables)();
int index_ikout;
/** - initialize indices relevant for back/thermo tables search */
ppw->last_index_back=0;
ppw->last_index_thermo=0;
ppw->inter_mode = pba->inter_normal;
/** - get wavenumber value */
k = ppt->k[index_md][index_k];
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
/** - If non-zero curvature, update array of free-streaming coefficients ppw->s_l */
if (pba->has_curvature == _TRUE_){
for (l = 0; l<=ppw->max_l_max; l++){
ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.));
}
}
/** - maximum value of tau for which sources are calculated for this wavenumber */
/* by default, today */
tau_actual_size = ppt->tau_size;
/** - using bisection, compute minimum value of tau for which this
wavenumber is integrated */
/* will be at least the first time in the background table */
tau_lower = pba->tau_table[0];
class_call(background_at_tau(pba,
tau_lower,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1.,
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/* check that this initial time is indeed OK given imposed
conditions on kappa' and on k/aH */
class_test(ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa]);
class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n",
ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]);
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w,
ppt->error_message,
"your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n",
n_ncdm,
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm],
ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]);
}
}
/* is at most the time at which sources must be sampled */
tau_upper = ppt->tau_sampling[0];
/* start bisection */
tau_mid = 0.5*(tau_lower + tau_upper);
while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) {
is_early_enough = _TRUE_;
class_call(background_at_tau(pba,
tau_mid,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
/* if there are non-cold relics, check that they are relativistic enough */
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w)
is_early_enough = _FALSE_;
}
}
/* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */
if (is_early_enough == _TRUE_) {
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_normal,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
if ((ppw->pvecback[pba->index_bg_a]*
ppw->pvecback[pba->index_bg_H]/
ppw->pvecthermo[pth->index_th_dkappa] >
ppr->start_small_k_at_tau_c_over_tau_h) ||
(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] >
ppr->start_large_k_at_tau_h_over_tau_k))
is_early_enough = _FALSE_;
}
if (is_early_enough == _TRUE_)
tau_lower = tau_mid;
else
tau_upper = tau_mid;
tau_mid = 0.5*(tau_lower + tau_upper);
}
tau = tau_mid;
/** - find the number of intervals over which approximation scheme is constant */
class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message);
ppw->inter_mode = pba->inter_normal;
class_call(perturb_find_approximation_number(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
&interval_number,
interval_number_of),
ppt->error_message,
ppt->error_message);
class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message);
class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message);
class_call(perturb_find_approximation_switches(ppr,
pba,
pth,
ppt,
index_md,
k,
ppw,
tau,
ppt->tau_sampling[tau_actual_size-1],
ppr->tol_tau_approx,
interval_number,
interval_number_of,
interval_limit,
interval_approx),
ppt->error_message,
ppt->error_message);
free(interval_number_of);
/** - fill the structure containing all fixed parameters, indices
and workspaces needed by perturb_derivs */
ppaw.ppr = ppr;
ppaw.pba = pba;
ppaw.pth = pth;
ppaw.ppt = ppt;
ppaw.index_md = index_md;
ppaw.index_ic = index_ic;
ppaw.index_k = index_k;
ppaw.k = k;
ppaw.ppw = ppw;
ppaw.ppw->inter_mode = pba->inter_closeby;
ppaw.ppw->last_index_back = 0;
ppaw.ppw->last_index_thermo = 0;
/** - check whether we need to print perturbations to a file for this wavenumber */
perhaps_print_variables = NULL;
ppw->index_ikout = -1;
for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){
if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){
ppw->index_ikout = index_ikout;
perhaps_print_variables = perturb_print_variables;
}
}
/** - loop over intervals over which approximation scheme is uniform. For each interval: */
for (index_interval=0; index_interval<interval_number; index_interval++) {
/** - --> (a) fix the approximation scheme */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
ppw->approx[index_ap]=interval_approx[index_interval][index_ap];
/** - --> (b) get the previous approximation scheme. If the current
interval starts from the initial time tau_ini, the previous
approximation is set to be a NULL pointer, so that the
function perturb_vector_init() knows that perturbations must
be initialized */
if (index_interval==0) {
previous_approx=NULL;
}
else {
previous_approx=interval_approx[index_interval-1];
}
/** - --> (c) define the vector of perturbations to be integrated
over. If the current interval starts from the initial time
tau_ini, fill the vector with initial conditions for each
mode. If it starts from an approximation switching point,
redistribute correctly the perturbations from the previous to
the new vector of perturbations. */
class_call(perturb_vector_init(ppr,
pba,
pth,
ppt,
index_md,
index_ic,
k,
interval_limit[index_interval],
ppw,
previous_approx),
ppt->error_message,
ppt->error_message);
/** - --> (d) integrate the perturbations over the current interval. */
if(ppr->evolver == rk){
generic_evolver = evolver_rk;
}
else{
generic_evolver = evolver_ndf15;
}
class_call(generic_evolver(perturb_derivs,
interval_limit[index_interval],
interval_limit[index_interval+1],
ppw->pv->y,
ppw->pv->used_in_sources,
ppw->pv->pt_size,
&ppaw,
ppr->tol_perturb_integration,
ppr->smallest_allowed_variation,
perturb_timescale,
ppr->perturb_integration_stepsize,
ppt->tau_sampling,
tau_actual_size,
perturb_sources,
perhaps_print_variables,
ppt->error_message),
ppt->error_message,
ppt->error_message);
}
/** - if perturbations were printed in a file, close the file */
//if (perhaps_print_variables != NULL)
// fclose(ppw->perturb_output_file);
/** - fill the source terms array with zeros for all times between
the last integrated time tau_max and tau_today. */
for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) {
for (index_tp = 0; index_tp < ppt->tp_size[index_md]; index_tp++) {
ppt->sources[index_md]
[index_ic * ppt->tp_size[index_md] + index_tp]
[index_tau * ppt->k_size[index_md] + index_k] = 0.;
}
}
/** - free quantities allocated at the beginning of the routine */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
for (index_interval=0; index_interval<interval_number; index_interval++)
free(interval_approx[index_interval]);
free(interval_approx);
free(interval_limit);
return _SUCCESS_;
}
/**
* Fill array of strings with the name of the 'k_output_values'
* functions (transfer functions as a function of time, for fixed
* values of k).
*
* @param pba Input: pointer to the background structure
* @param ppt Input/Output: pointer to the perturbation structure
* @return the error status
*/
int perturb_prepare_k_output(struct background * pba,
struct perturbs * ppt){
int n_ncdm;
char tmp[40];
ppt->scalar_titles[0]='\0';
ppt->vector_titles[0]='\0';
ppt->tensor_titles[0]='\0';
if (ppt->k_output_values_num > 0) {
/** Write titles for all perturbations that we would like to print/store. */
if (ppt->has_scalars == _TRUE_){
class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"a",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_);
/* Perturbed recombination */
class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination);
class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination);
/* Ultrarelativistic species */
class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur);
class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur);
/* Interacting dark radiation */
class_store_columntitle(ppt->scalar_titles,"delta_idr",pba->has_idr);
class_store_columntitle(ppt->scalar_titles,"theta_idr",pba->has_idr);
if ((pba->has_idr == _TRUE_)&&(ppt->idr_nature == idr_free_streaming))
class_store_columntitle(ppt->scalar_titles,"shear_idr",_TRUE_);
/* Interacting dark matter */
class_store_columntitle(ppt->scalar_titles,"delta_idm_dr",pba->has_idm_dr);
class_store_columntitle(ppt->scalar_titles,"theta_idm_dr",pba->has_idm_dr);
/* Cold dark matter */
class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm);
class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm);
/* GDM_CLASS: added gdm fluid variables */
class_store_columntitle(ppt->scalar_titles,"delta_gdm",pba->has_gdm);
class_store_columntitle(ppt->scalar_titles,"theta_gdm",pba->has_gdm);
class_store_columntitle(ppt->scalar_titles,"shear_gdm",pba->has_gdm);
class_store_columntitle(ppt->scalar_titles,"pinad_gdm",pba->has_gdm);
/* GDM_CLASS: added new sources */
class_store_columntitle(ppt->scalar_titles,"temperC",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"ISW1C",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"ISW2C",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"dopplC",_TRUE_);
class_store_columntitle(ppt->scalar_titles,"doppldotC",_TRUE_);
/* Non-cold dark matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
sprintf(tmp,"cs2_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_);
}
}
/* Decaying cold dark matter */
class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm);
class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm);
/* Decay radiation */
class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr);
class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr);
/* Scalar field scf */
class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf);
class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf);
/** Fluid */
class_store_columntitle(ppt->scalar_titles, "delta_rho_fld", pba->has_fld);
class_store_columntitle(ppt->scalar_titles, "rho_plus_p_theta_fld", pba->has_fld);
class_store_columntitle(ppt->scalar_titles, "delta_p_fld", pba->has_fld);
ppt->number_of_scalar_titles =
get_number_of_titles(ppt->scalar_titles);
}
if (ppt->has_tensors == _TRUE_){
class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"a",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_);
class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur);
class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur);
if (ppt->evolve_tensor_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
sprintf(tmp,"delta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"theta_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
sprintf(tmp,"shear_ncdm[%d]",n_ncdm);
class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_);
}
}
ppt->number_of_tensor_titles =
get_number_of_titles(ppt->tensor_titles);
}
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the number of intervals of
* time between tau_ini and tau_end such that the approximation
* scheme (and the number of perturbation equations) is uniform.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param interval_number Output: total number of intervals
* @param interval_number_of Output: number of intervals with respect to each particular approximation
* @return the error status
*/
int perturb_find_approximation_number(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
int * interval_number,
int * interval_number_of /* interval_number_of[index_ap] (already allocated) */
){
/** Summary: */
/* index running over approximations */
int index_ap;
/* value of a given approximation at tau_ini and tau_end */
int flag_ini,flag_end;
/** - fix default number of intervals to one (if no approximation switch) */
*interval_number=1;
/** - loop over each approximation and add the number of approximation switching times */
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
flag_ini = ppw->approx[index_ap];
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
flag_end = ppw->approx[index_ap];
class_test(flag_end<flag_ini,
ppt->error_message,
"For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one");
*interval_number += flag_end-flag_ini;
interval_number_of[index_ap] = flag_end-flag_ini+1;
}
return _SUCCESS_;
}
/**
* For a given mode and wavenumber, find the values of time at which
* the approximation changes.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: index of wavenumber
* @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces
* @param tau_ini Input: initial time of the perturbation integration
* @param tau_end Input: final time of the perturbation integration
* @param precision Input: tolerance on output values
* @param interval_number Input: total number of intervals
* @param interval_number_of Input: number of intervals with respect to each particular approximation
* @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end
* @param interval_approx Output: value of approximations in each interval
* @return the error status
*/
int perturb_find_approximation_switches(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
struct perturb_workspace * ppw,
double tau_ini,
double tau_end,
double precision,
int interval_number,
int * interval_number_of,
double * interval_limit, /* interval_limit[index_interval] (already allocated) */
int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */
){
/** Summary: */
int index_ap;
int index_switch;
int index_switch_tot;
int num_switch;
double tau_min,lower_bound,upper_bound;
double mid=0;
double * unsorted_tau_switch;
double next_tau_switch;
int flag_ini;
int num_switching_at_given_time;
/** - write in output arrays the initial time and approximation */
interval_limit[0]=tau_ini;
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_ini,
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++)
interval_approx[0][index_ap]=ppw->approx[index_ap];
/** - if there are no approximation switches, just write final time and return */
if (interval_number == 1) {
interval_limit[1]=tau_end;
}
/** - if there are switches, consider approximations one after each
other. Find switching time by bisection. Store all switches in
arbitrary order in array unsorted_tau_switch[ ] */
else {
class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message);
index_switch_tot=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_number_of[index_ap] > 1) {
num_switch = interval_number_of[index_ap]-1;
tau_min = tau_ini;
flag_ini = interval_approx[0][index_ap];
for (index_switch=0; index_switch<num_switch; index_switch++) {
lower_bound=tau_min;
upper_bound=tau_end;
mid = 0.5*(lower_bound+upper_bound);
while (upper_bound - lower_bound > precision) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
mid,
ppw),
ppt->error_message,
ppt->error_message);
if (ppw->approx[index_ap] > flag_ini+index_switch) {
upper_bound=mid;
}
else {
lower_bound=mid;
}
mid = 0.5*(lower_bound+upper_bound);
}
unsorted_tau_switch[index_switch_tot]=mid;
index_switch_tot++;
tau_min=mid;
}
}
}
class_test(index_switch_tot != (interval_number-1),
ppt->error_message,
"bug in approximation switch search routine: should have %d = %d",
index_switch_tot,interval_number-1);
/** - now sort interval limits in correct order */
index_switch_tot=1;
while (index_switch_tot < interval_number) {
next_tau_switch=tau_end;
for (index_switch=0; index_switch<interval_number-1; index_switch++) {
if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) &&
(unsorted_tau_switch[index_switch] < next_tau_switch)) {
next_tau_switch=unsorted_tau_switch[index_switch];
}
}
interval_limit[index_switch_tot]=next_tau_switch;
index_switch_tot++;
}
interval_limit[index_switch_tot]=tau_end;
class_test(index_switch_tot != interval_number,
ppt->error_message,
"most probably two approximation switching time were found to be equal, which cannot be handled\n");
/** - store each approximation in chronological order */
for (index_switch=1; index_switch<interval_number; index_switch++) {
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]),
ppw),
ppt->error_message,
ppt->error_message);
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
interval_approx[index_switch][index_ap]=ppw->approx[index_ap];
/* check here that approximation does not go backward (remember
that by definition the value of an approximation can only
increase) */
class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap],
ppt->error_message,
"The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n",
index_ap,
interval_approx[index_switch-1][index_ap],
interval_approx[index_switch][index_ap],
k,
0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]),
0.5*(interval_limit[index_switch]+interval_limit[index_switch+1])
);
}
/* check here that more than one approximation is not switched on at a given time */
num_switching_at_given_time=0;
for (index_ap=0; index_ap<ppw->ap_size; index_ap++) {
if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap])
num_switching_at_given_time++;
}
class_test(num_switching_at_given_time != 1,
ppt->error_message,
"for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n",
k,
interval_limit[index_switch],
num_switching_at_given_time);
if (ppt->perturbations_verbose>2) {
if (_scalars_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]);
//fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]);
if (pba->has_idr == _TRUE_){
if ((interval_approx[index_switch-1][ppw->index_ap_rsa_idr]==(int)rsa_idr_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa_idr]==(int)rsa_idr_on))
fprintf(stdout,"Mode k=%e: will switch on dark radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]);
}
if (pba->has_idm_dr == _TRUE_){
if ((interval_approx[index_switch-1][ppw->index_ap_tca_idm_dr]==(int)tca_idm_dr_on) &&
(interval_approx[index_switch][ppw->index_ap_tca_idm_dr]==(int)tca_idm_dr_off))
fprintf(stdout,"Mode k=%e: will switch off dark tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]);
}
if (pba->has_ur == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) &&
(interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
if (pba->has_ncdm == _TRUE_) {
if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) &&
(interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) {
fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
if (_tensors_) {
if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) &&
(interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off))
fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) &&
(interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on))
fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]);
}
}
}
free(unsorted_tau_switch);
class_call(perturb_approximations(ppr,
pba,
pth,
ppt,
index_md,
k,
tau_end,
ppw),
ppt->error_message,
ppt->error_message);
}
return _SUCCESS_;
}
/**
* Initialize the field '-->pv' of a perturb_workspace structure, which
* is a perturb_vector structure. This structure contains indices and
* values of all quantities which need to be integrated with respect
* to time (and only them: quantities fixed analytically or obeying
* constraint equations are NOT included in this vector). This routine
* distinguishes between two cases:
*
* --> the input pa_old is set to the NULL pointer:
*
* This happens when we start integrating over a new wavenumber and we
* want to set initial conditions for the perturbations. Then, it is
* assumed that ppw-->pv is not yet allocated. This routine allocates
* it, defines all indices, and then fills the vector ppw-->pv-->y with
* the initial conditions defined in perturb_initial_conditions.
*
* --> the input pa_old is not set to the NULL pointer and describes
* some set of approximations:
*
* This happens when we need to change approximation scheme while
* integrating over a given wavenumber. The new approximation
* described by ppw-->pa is then different from pa_old. Then, this
* routine allocates a new vector with a new size and new index
* values; it fills this vector with initial conditions taken from the
* previous vector passed as an input in ppw-->pv, and eventually with
* some analytic approximations for the new variables appearing at
* this time; then the new vector comes in replacement of the old one,
* which is freed.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to the thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @param pa_old Input: NULL is we need to set y to initial conditions for a new wavenumber; points towards a perturb_approximations if we want to switch of approximation.
* @return the error status
*/
int perturb_vector_init(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */
int * pa_old
) {
/** Summary: */
/** - define local variables */
struct perturb_vector * ppv;
int index_pt;
int l;
int n_ncdm,index_q,ncdm_l_size;
double rho_plus_p_ncdm,q,q2,epsilon,a,factor;
/** - allocate a new perturb_vector structure to which ppw-->pv will point at the end of the routine */
class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message);
/** - initialize pointers to NULL (they will be allocated later if
needed), relevant for perturb_vector_free() */
ppv->l_max_ncdm = NULL;
ppv->q_size_ncdm = NULL;
/** - define all indices in this new vector (depends on approximation scheme, described by the input structure ppw-->pa) */
index_pt = 0;
if (_scalars_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g < 4,
ppt->error_message,
"ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g < 4,
ppt->error_message,
"ppr->l_max_pol_g should be at least 4");
/* reject inconsistent values of the number of mutipoles in decay radiation hierarchy */
if (pba->has_dr == _TRUE_) {
class_test(ppr->l_max_dr < 4,
ppt->error_message,
"ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
if (pba->has_ur == _TRUE_) {
class_test(ppr->l_max_ur < 4,
ppt->error_message,
"ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum");
}
if (pba->has_idr == _TRUE_){
class_test(((ppr->l_max_idr < 4)&&(ppt->idr_nature == idr_free_streaming)),
ppt->error_message,
"ppr->l_max_idr should be at least 4, i.e. we must integrate at least over interacting dark radiation density, velocity, shear, third and fourth momentum");
}
/* photons */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
/* temperature */
ppv->l_max_g = ppr->l_max_g;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */
/* polarization */
ppv->l_max_pol_g = ppr->l_max_pol_g;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2);
}
}
/* baryons */
class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */
/* cdm */
class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */
class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */
/* GDM_CLASS: gdm */
class_define_index(ppv->index_pt_delta_gdm,pba->has_gdm,index_pt,1); /* gdm density */
class_define_index(ppv->index_pt_theta_gdm,pba->has_gdm,index_pt,1); /* gdm velocity */
if (ppt->dynamic_shear_gdm == _TRUE_) {
class_define_index(ppv->index_pt_shear_gdm,pba->has_gdm,index_pt,1); /* dynamic gdm shear */
}
/* idm_dr */
class_define_index(ppv->index_pt_delta_idm_dr,pba->has_idm_dr,index_pt,1); /* idm_dr density */
class_define_index(ppv->index_pt_theta_idm_dr,pba->has_idm_dr,index_pt,1); /* idm_dr velocity */
/* dcdm */
class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */
class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */
/* ultra relativistic decay radiation */
if (pba->has_dr==_TRUE_){
ppv->l_max_dr = ppr->l_max_dr;
class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */
}
/* fluid */
if (pba->use_ppf == _FALSE_) {
class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */
class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */
}
else {
class_define_index(ppv->index_pt_Gamma_fld,pba->has_fld,index_pt,1); /* Gamma variable of PPF scheme */
}
/* scalar field */
class_define_index(ppv->index_pt_phi_scf,pba->has_scf,index_pt,1); /* scalar field density */
class_define_index(ppv->index_pt_phi_prime_scf,pba->has_scf,index_pt,1); /* scalar field velocity */
/* perturbed recombination: the indices are defined once tca is off. */
if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1);
class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1);
}
/* ultra relativistic neutrinos */
if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */
class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
}
}
/* interacting dark radiation */
if (pba->has_idr == _TRUE_){
if(ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off) {
class_define_index(ppv->index_pt_delta_idr,_TRUE_,index_pt,1); /* density of interacting dark radiation */
class_define_index(ppv->index_pt_theta_idr,_TRUE_,index_pt,1); /* velocity of interacting dark radiation */
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
class_define_index(ppv->index_pt_shear_idr,_TRUE_,index_pt,1); /* shear of interacting dark radiation */
ppv->l_max_idr = ppr->l_max_idr;
class_define_index(ppv->index_pt_l3_idr,_TRUE_,index_pt,ppv->l_max_idr-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
}
}
}
}
/* non-cold dark matter */
if (pba->has_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){
/* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
}
else{
// In the fluid approximation, hierarchy is cut at lmax = 2 and q dependence is integrated out:
ppv->l_max_ncdm[n_ncdm] = 2;
ppv->q_size_ncdm[n_ncdm] = 1;
}
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/* metric (only quantities to be integrated, not those obeying constraint equations) */
/* metric perturbation eta of synchronous gauge */
class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1);
/* metric perturbation phi of newtonian gauge ( we could fix it
using Einstein equations as a constraint equation for phi, but
integration is numerically more stable if we actually evolve
phi) */
class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1);
}
if (_vectors_) {
/* Vector baryon velocity: v_b^{(1)}. */
class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1);
/* eventually reject inconsistent values of the number of mutipoles in photon temperature hierarchy and polarization*/
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/** - (a) metric perturbations V or \f$ h_v \f$ depending on gauge */
if (ppt->gauge == synchronous){
class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1);
}
if (ppt->gauge == newtonian){
class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1);
}
}
if (_tensors_) {
/* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */
class_test(ppr->l_max_g_ten < 4,
ppt->error_message,
"ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum");
/* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */
class_test(ppr->l_max_pol_g_ten < 4,
ppt->error_message,
"ppr->l_max_pol_g_ten should be at least 4");
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppv->l_max_g = ppr->l_max_g_ten;
class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */
class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */
class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */
class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */
ppv->l_max_pol_g = ppr->l_max_pol_g_ten;
class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */
class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */
class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */
class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */
}
}
/* ultra relativistic neutrinos */
class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */
class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */
class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */
ppv->l_max_ur = ppr->l_max_ur;
class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
ppv->index_pt_psi0_ncdm1 = index_pt;
ppv->N_ncdm = pba->N_ncdm;
class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message);
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){
// Set value of ppv->l_max_ncdm:
class_test(ppr->l_max_ncdm < 4,
ppt->error_message,
"ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm);
//Copy value from precision parameter:
ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm;
ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm];
index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm];
}
}
/** - (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included
in the vector of ordinary perturbations, no in that of metric perturbations */
class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */
class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */
}
ppv->pt_size = index_pt;
/** - allocate vectors for storing the values of all these
quantities and their time-derivatives at a given time */
class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message);
class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message);
class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message);
/** - specify which perturbations are needed in the evaluation of source terms */
/* take all of them by default */
for (index_pt=0; index_pt<ppv->pt_size; index_pt++)
ppv->used_in_sources[index_pt] = _TRUE_;
/* indicate which ones are not needed (this is just for saving time,
omitting perturbations in this list will not change the
results!) */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above l=2 (but they are
defined only when rsa and tca are off) */
for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* for polarization, we only need l=0,2 (but l =1,3, ... are
defined only when rsa and tca are off) */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/* we don't need ur multipoles above l=2 (but they are
defined only when rsa and ufa are off) */
for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
}
if (pba->has_idr == _TRUE_) {
/* we don't need interacting dark radiation multipoles
above l=2 (but they are defined only when rsa_idr
and tca_idm_dr are off) */
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off){
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
for (index_pt=ppv->index_pt_l3_idr; index_pt <= ppv->index_pt_delta_idr+ppv->l_max_idr; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
}
}
if (pba->has_ncdm == _TRUE_) {
/* we don't need ncdm multipoles above l=2 (but they are
defined only when ncdmfa is off) */
index_pt = ppv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
if (l>2) ppv->used_in_sources[index_pt]=_FALSE_;
index_pt++;
}
}
}
}
}
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* we don't need temperature multipoles above except l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_;
for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
/* same for polarization, we only need l=0,2,4 */
ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_;
ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_;
for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++)
ppv->used_in_sources[index_pt]=_FALSE_;
}
}
/* we need h' but not h */
ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_;
}
/** - case of setting initial conditions for a new wavenumber */
if (pa_old == NULL) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau);
if (_scalars_) {
/** - --> (a) check that current approximation scheme is consistent
with initial conditions */
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"scalar initial conditions assume radiation streaming approximation turned off");
if (pba->has_idr == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on,
ppt->error_message,
"scalar initial conditions assume dark radiation approximation turned off");
}
/* we do not need to do a check for tca_idm_dr, as the initial conditions are consistent with any tca_idm_dr */
if (pba->has_ur == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on,
ppt->error_message,
"scalar initial conditions assume ur fluid approximation turned off");
}
if (pba->has_ncdm == _TRUE_) {
class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on,
ppt->error_message,
"scalar initial conditions assume ncdm fluid approximation turned off");
}
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"scalar initial conditions assume tight-coupling approximation turned on");
}
if (_tensors_) {
class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off,
ppt->error_message,
"tensor initial conditions assume tight-coupling approximation turned on");
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on,
ppt->error_message,
"tensor initial conditions assume radiation streaming approximation turned off");
}
/** - --> (b) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
/** - --> (c) fill the vector ppw-->pv-->y with appropriate initial conditions */
class_call(perturb_initial_conditions(ppr,
pba,
ppt,
index_md,
index_ic,
k,
tau,
ppw),
ppt->error_message,
ppt->error_message);
}
/** - case of switching approximation while a wavenumber is being integrated */
else {
/** - --> (a) for the scalar mode: */
if (_scalars_) {
/** - ---> (a.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
if (pba->has_idm_dr == _TRUE_){
class_test((pa_old[ppw->index_ap_tca] == (int)tca_idm_dr_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_idm_dr_on),
ppt->error_message,
"at tau=%g: the dark tight-coupling approximation can be switched off, not on",tau);
}
/** - ---> (a.2.) some variables (b, cdm, fld, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_delta_b] =
ppw->pv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
if (pba->has_cdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_cdm] =
ppw->pv->y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == newtonian) {
ppv->y[ppv->index_pt_theta_cdm] =
ppw->pv->y[ppw->pv->index_pt_theta_cdm];
}
}
/* GDM_CLASS */
if (pba->has_gdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_gdm] =
ppw->pv->y[ppw->pv->index_pt_delta_gdm];
ppv->y[ppv->index_pt_theta_gdm] =
ppw->pv->y[ppw->pv->index_pt_theta_gdm];
if (ppt->dynamic_shear_gdm == _TRUE_) {
ppv->y[ppv->index_pt_shear_gdm] =
ppw->pv->y[ppw->pv->index_pt_shear_gdm];
}
}
/* END GDM_CLASS */
if (pba->has_idm_dr == _TRUE_) {
ppv->y[ppv->index_pt_delta_idm_dr] =
ppw->pv->y[ppw->pv->index_pt_delta_idm_dr];
ppv->y[ppv->index_pt_theta_idm_dr] =
ppw->pv->y[ppw->pv->index_pt_theta_idm_dr];
}
if (pba->has_dcdm == _TRUE_) {
ppv->y[ppv->index_pt_delta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
ppv->y[ppv->index_pt_theta_dcdm] =
ppw->pv->y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_){
for (l=0; l <= ppv->l_max_dr; l++)
ppv->y[ppv->index_pt_F0_dr+l] =
ppw->pv->y[ppw->pv->index_pt_F0_dr+l];
}
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_) {
ppv->y[ppv->index_pt_delta_fld] =
ppw->pv->y[ppw->pv->index_pt_delta_fld];
ppv->y[ppv->index_pt_theta_fld] =
ppw->pv->y[ppw->pv->index_pt_theta_fld];
}
else {
ppv->y[ppv->index_pt_Gamma_fld] =
ppw->pv->y[ppw->pv->index_pt_Gamma_fld];
}
}
if (pba->has_scf == _TRUE_) {
ppv->y[ppv->index_pt_phi_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_scf];
ppv->y[ppv->index_pt_phi_prime_scf] =
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf];
}
if (ppt->gauge == synchronous)
ppv->y[ppv->index_pt_eta] =
ppw->pv->y[ppw->pv->index_pt_eta];
if (ppt->gauge == newtonian)
ppv->y[ppv->index_pt_phi] =
ppw->pv->y[ppw->pv->index_pt_phi];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
/* tight-coupling approximation for shear_g (previously
computed in perturb_derivs: perturb_derivs is always
called at the end of generic_evolver, in order to update
all quantities in ppw to the time at which the
approximation is switched off) */
ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g;
ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */
ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */
ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */
ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */
ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */
if (pba->has_ur == _TRUE_) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off){
ppv->y[ppv->index_pt_delta_idr] =
ppw->pv->y[ppw->pv->index_pt_delta_idr];
ppv->y[ppv->index_pt_theta_idr] =
ppw->pv->y[ppw->pv->index_pt_theta_idr];
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
ppv->y[ppv->index_pt_shear_idr] =
ppw->pv->y[ppw->pv->index_pt_shear_idr];
ppv->y[ppv->index_pt_l3_idr] =
ppw->pv->y[ppw->pv->index_pt_l3_idr];
for (l=4; l <= ppv->l_max_idr; l++)
ppv->y[ppv->index_pt_delta_idr+l] =
ppw->pv->y[ppw->pv->index_pt_delta_idr+l];
}
}
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* perturbed recombination */
/* the initial conditions are set when tca is switched off (current block) */
if (ppt->has_perturbed_recombination == _TRUE_){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.;
}
} // end of block tca ON -> tca OFF
/* perturbed recombination */
/* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){
ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =
ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off){
ppv->y[ppv->index_pt_delta_idr] =
ppw->pv->y[ppw->pv->index_pt_delta_idr];
ppv->y[ppv->index_pt_theta_idr] =
ppw->pv->y[ppw->pv->index_pt_theta_idr];
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
ppv->y[ppv->index_pt_shear_idr] =
ppw->pv->y[ppw->pv->index_pt_shear_idr];
ppv->y[ppv->index_pt_l3_idr] =
ppw->pv->y[ppw->pv->index_pt_l3_idr];
for (l=4; l <= ppv->l_max_idr; l++)
ppv->y[ppv->index_pt_delta_idr+l] =
ppw->pv->y[ppw->pv->index_pt_delta_idr+l];
}
}
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
/* -- case of switching on ur fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ur == _TRUE_) {
if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
}
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off){
ppv->y[ppv->index_pt_delta_idr] =
ppw->pv->y[ppw->pv->index_pt_delta_idr];
ppv->y[ppv->index_pt_theta_idr] =
ppw->pv->y[ppw->pv->index_pt_theta_idr];
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
ppv->y[ppv->index_pt_shear_idr] =
ppw->pv->y[ppw->pv->index_pt_shear_idr];
ppv->y[ppv->index_pt_l3_idr] =
ppw->pv->y[ppw->pv->index_pt_l3_idr];
for (l=4; l <= ppv->l_max_idr; l++)
ppv->y[ppv->index_pt_delta_idr+l] =
ppw->pv->y[ppw->pv->index_pt_delta_idr+l];
}
}
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
/* Case of switching on rsa for interacting dark radiation */
if (pba->has_idr == _TRUE_) {
if ((pa_old[ppw->index_ap_rsa_idr] == (int)rsa_idr_off) && (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on dark radiation approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
if (pba->has_idm_dr == _TRUE_) {
/* Case of switching off interacting dark radiation tight coupling approximation */
if ((pa_old[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_on) && (ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off dark tight coupling approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_idr_off) {
ppv->y[ppv->index_pt_delta_idr] =
ppw->pv->y[ppw->pv->index_pt_delta_idr];
ppv->y[ppv->index_pt_theta_idr] =
ppw->pv->y[ppw->pv->index_pt_theta_idr];
/* idr is always free streaming if tca_idm_dr is on */
if (ppt->idr_nature == idr_free_streaming){
ppv->y[ppv->index_pt_shear_idr] = ppw->tca_shear_idm_dr;
ppv->y[ppv->index_pt_l3_idr] = 6./7.*k*ppv->y[ppv->index_pt_shear_idr]/ppw->pvecthermo[pth->index_th_dmu_idm_dr]/ppt->alpha_idm_dr[1];
}
}
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
if (pba->has_ncdm == _TRUE_) {
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){
/* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and
ppv->q_size_ncdm is updated.*/
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
}
}
/* -- case of switching on ncdm fluid
approximation. Provide correct initial conditions to new set
of variables */
if (pba->has_ncdm == _TRUE_) {
if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau);
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_g] =
ppw->pv->y[ppw->pv->index_pt_delta_g];
ppv->y[ppv->index_pt_theta_g] =
ppw->pv->y[ppw->pv->index_pt_theta_g];
}
if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) {
ppv->y[ppv->index_pt_shear_g] =
ppw->pv->y[ppw->pv->index_pt_shear_g];
ppv->y[ppv->index_pt_l3_g] =
ppw->pv->y[ppw->pv->index_pt_l3_g];
for (l = 4; l <= ppw->pv->l_max_g; l++) {
ppv->y[ppv->index_pt_delta_g+l] =
ppw->pv->y[ppw->pv->index_pt_delta_g+l];
}
ppv->y[ppv->index_pt_pol0_g] =
ppw->pv->y[ppw->pv->index_pt_pol0_g];
ppv->y[ppv->index_pt_pol1_g] =
ppw->pv->y[ppw->pv->index_pt_pol1_g];
ppv->y[ppv->index_pt_pol2_g] =
ppw->pv->y[ppw->pv->index_pt_pol2_g];
ppv->y[ppv->index_pt_pol3_g] =
ppw->pv->y[ppw->pv->index_pt_pol3_g];
for (l = 4; l <= ppw->pv->l_max_pol_g; l++) {
ppv->y[ppv->index_pt_pol0_g+l] =
ppw->pv->y[ppw->pv->index_pt_pol0_g+l];
}
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
}
}
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off){
ppv->y[ppv->index_pt_delta_idr] =
ppw->pv->y[ppw->pv->index_pt_delta_idr];
ppv->y[ppv->index_pt_theta_idr] =
ppw->pv->y[ppw->pv->index_pt_theta_idr];
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
ppv->y[ppv->index_pt_shear_idr] =
ppw->pv->y[ppw->pv->index_pt_shear_idr];
ppv->y[ppv->index_pt_l3_idr] =
ppw->pv->y[ppw->pv->index_pt_l3_idr];
for (l=4; l <= ppv->l_max_idr; l++)
ppv->y[ppv->index_pt_delta_idr+l] =
ppw->pv->y[ppw->pv->index_pt_delta_idr+l];
}
}
}
}
a = ppw->pvecback[pba->index_bg_a];
index_pt = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
// We are in the fluid approximation, so ncdm_l_size is always 3.
ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1;
rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+
ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
for(l=0; l<=2; l++){
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0;
}
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){
// Integrate over distributions:
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] +=
pba->w_ncdm[n_ncdm][index_q]*q2*epsilon*
ppw->pv->y[index_pt];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q*
ppw->pv->y[index_pt+1];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] +=
pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon*
ppw->pv->y[index_pt+2];
//Jump to next momentum bin in ppw->pv->y:
index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm;
ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm;
}
}
}
}
/** - --> (b) for the vector mode */
if (_vectors_) {
/** - ---> (b.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (b.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
if (ppt->gauge == synchronous){
ppv->y[ppv->index_pt_hv_prime] =
ppw->pv->y[ppw->pv->index_pt_hv_prime];
}
if (ppt->gauge == newtonian){
ppv->y[ppv->index_pt_V] =
ppw->pv->y[ppw->pv->index_pt_V];
}
ppv->y[ppv->index_pt_theta_b] =
ppw->pv->y[ppw->pv->index_pt_theta_b];
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC
//-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC
//1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (c) for the tensor mode */
if (_tensors_) {
/** - ---> (c.1.) check that the change of approximation scheme makes
sense (note: before calling this routine there is already a
check that we wish to change only one approximation flag at
a time) */
class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on),
ppt->error_message,
"at tau=%g: the tight-coupling approximation can be switched off, not on",tau);
/** - ---> (c.2.) some variables (gw, gwdot, ...) are not affected by
any approximation. They need to be reconducted whatever
the approximation switching is. We treat them here. Below
we will treat other variables case by case. */
ppv->y[ppv->index_pt_gw] =
ppw->pv->y[ppw->pv->index_pt_gw];
ppv->y[ppv->index_pt_gwdot] =
ppw->pv->y[ppw->pv->index_pt_gwdot];
if (ppt->evolve_tensor_ur == _TRUE_){
/* For now, neutrinos go here. */
ppv->y[ppv->index_pt_delta_ur] =
ppw->pv->y[ppw->pv->index_pt_delta_ur];
ppv->y[ppv->index_pt_theta_ur] =
ppw->pv->y[ppw->pv->index_pt_theta_ur];
ppv->y[ppv->index_pt_shear_ur] =
ppw->pv->y[ppw->pv->index_pt_shear_ur];
ppv->y[ppv->index_pt_l3_ur] =
ppw->pv->y[ppw->pv->index_pt_l3_ur];
for (l=4; l <= ppv->l_max_ur; l++)
ppv->y[ppv->index_pt_delta_ur+l] =
ppw->pv->y[ppw->pv->index_pt_delta_ur+l];
}
if (ppt->evolve_tensor_ncdm == _TRUE_){
index_pt = 0;
for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){
for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){
for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){
// This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly.
ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] =
ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt];
index_pt++;
}
}
}
}
/* -- case of switching off tight coupling
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau);
ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa];
}
/* -- case of switching on radiation streaming
approximation. Provide correct initial conditions to new set
of variables */
if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) {
if (ppt->perturbations_verbose>2)
fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]);
}
}
/** - --> (d) free the previous vector of perturbations */
class_call(perturb_vector_free(ppw->pv),
ppt->error_message,
ppt->error_message);
/** - --> (e) let ppw-->pv points towards the perturb_vector structure
that we just created */
ppw->pv = ppv;
}
return _SUCCESS_;
}
/**
* Free the perturb_vector structure.
*
* @param pv Input: pointer to perturb_vector structure to be freed
* @return the error status
*/
int perturb_vector_free(
struct perturb_vector * pv
) {
if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm);
if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm);
free(pv->y);
free(pv->dy);
free(pv->used_in_sources);
free(pv);
return _SUCCESS_;
}
/**
* For each mode, wavenumber and initial condition, this function
* initializes in the vector all values of perturbed variables (in a
* given gauge). It is assumed here that all values have previously been
* set to zero, only non-zero values are set here.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param index_ic Input: index of initial condition under consideration (ad, iso...)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y.
* @return the error status
*/
int perturb_initial_conditions(struct precision * ppr,
struct background * pba,
struct perturbs * ppt,
int index_md,
int index_ic,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** --> Declare local variables */
double a,a_prime_over_a;
double w_fld,dw_over_da_fld,integral_fld;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime;
double delta_gdm=0.,theta_gdm=0.,w_gdm=0,cs2_gdm=0.,ca2_gdm=0.,cv2_gdm=0.; // GDM_CLASS
double delta_dr=0;
double q,epsilon,k2;
int index_q,n_ncdm,idx;
double rho_r,rho_m,rho_nu,rho_m_over_rho_r;
double fracnu,fracg,fracb,fraccdm,om;
double fracgdm,omk,RnuTerm,RnuAltTerm,omtau,csTerm1,csTerm4; // GDM_CLASS
double ktau_two,ktau_three;
double f_dr;
double delta_tot;
double velocity_tot;
double s2_squared;
/** --> For scalars */
if (_scalars_) {
/** - (a) compute relevant background quantities: compute rho_r,
rho_m, rho_nu (= all relativistic except photons), and their
ratio. */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_normal,
&(ppw->last_index_back),
ppw->pvecback),
pba->error_message,
ppt->error_message);
a = ppw->pvecback[pba->index_bg_a];
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
/* 8piG/3 rho_r(t_i) */
rho_r = ppw->pvecback[pba->index_bg_rho_g];
/* 8piG/3 rho_m(t_i) */
rho_m = ppw->pvecback[pba->index_bg_rho_b];
/* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */
rho_nu = 0.;
if (pba->has_cdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
/* GDM_CLASS
/* this has implications for how the Einstein equations are written using the
radiation/matter ratio rho_m_over_rho_r. The fld is supposed to be close to
cdm regarding the background: |w| << 1 */
if (pba->has_gdm == _TRUE_) {
/* This next test ensures that integration starts early enough for the
simplified GDM initial conditions to be valid */
class_test(ppr->start_small_k_at_tau_c_over_tau_h > 1e-6,
ppt->error_message,
"The precision parameter 'start_small_k_at_tau_c_over_tau_h' is too high (= %e) for GDM, use a value <= 1e-6.");
rho_m += ppw->pvecback[pba->index_bg_rho_gdm];
}
/* END GDM_CLASS */
if (pba->has_idm_dr == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_idm_dr];
}
if (pba->has_dcdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if (pba->has_dr == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_dr];
rho_nu += ppw->pvecback[pba->index_bg_rho_dr];
}
if (pba->has_ur == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_ur];
rho_nu += ppw->pvecback[pba->index_bg_rho_ur];
}
if (pba->has_idr == _TRUE_) {
rho_r += ppw->pvecback[pba->index_bg_rho_idr];
rho_nu += ppw->pvecback[pba->index_bg_rho_idr];
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){
rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm];
}
}
class_test(rho_r == 0.,
ppt->error_message,
"stop to avoid division by zero");
/* f_nu = Omega_nu(t_i) / Omega_r(t_i) */
fracnu = rho_nu/rho_r;
/* f_g = Omega_g(t_i) / Omega_r(t_i) */
fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r;
/* f_b = Omega_b(t_i) / Omega_m(t_i) */
fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m;
/* GDM_CLASS */
/* initial conditions need to be modified in case of initially time varying w.
Here we assume that w_ini=const and ca2_ini=w_ini.
Although w,ca2,cs2,cv2 are now in principle scale and time-dependent, the
initial conditions still require all of them to be time-independent. Check this
only via w==ca2. It is allowed that both cs2 and cv2 depend on scale (no
modifications if IC required). */
fracgdm = 0;
if (pba->has_gdm == _TRUE_) {
fracgdm = ppw->pvecback[pba->index_bg_rho_gdm]/rho_m;
w_gdm = ppw->pvecback[pba->index_bg_w_gdm];
ca2_gdm = ppw->pvecback[pba->index_bg_ca2_gdm];
class_test(w_gdm != ca2_gdm,
ppt->error_message,
"Stopped because w is not equal to ca2 initially, which is required by the GDM initial conditions.");
cs2_gdm = cs2_gdm_of_a_and_k(pba,a,k, ppw);
cv2_gdm = cv2_gdm_of_a_and_k(pba,a,k);
}
/* some other shortcut notations */
csTerm4 = 4. + 3.*cs2_gdm - 6.*w_gdm;
csTerm1 = 1. + 2.*cs2_gdm - 3.*w_gdm;
RnuTerm = 15. + 4.*fracnu;
RnuAltTerm = 5. + 4.*fracnu;
omtau = om*tau;
omk= om/k;
/* END GDM_CLASS */
/* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */
// fraccdm = 1.-fracb; // GDM_CLASS
fraccdm = 1.-fracb-fracgdm; // GDM_CLASS
/* Omega_m(t_i) / Omega_r(t_i) */
rho_m_over_rho_r = rho_m/rho_r;
/* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i))
= Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4
= (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1
This (a priori strange) parameter is the relevant one for expressing a
as a function of tau during radiation and matter domination (but not DE domination).
Indeed the exact solution of Friedmann when there is only radiation and matter in
the universe is
a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega]
*/
om = a*rho_m/sqrt(rho_r);
/* (k tau)^2, (k tau)^3 */
ktau_two=k*k*tau*tau;
ktau_three=k*tau*ktau_two;
/* curvature-dependent factors */
s2_squared = 1.-3.*pba->K/k/k;
/** - (b) starts by setting everything in synchronous gauge. If
another gauge is needed, we will perform a gauge
transformation below. */
/** - --> (b.1.) adiabatic */
if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) {
/* The following formulas are valid at leading order in
(k*tau) and (om*tau), and order zero in
tight-coupling. Identical to first order terms in CRS,
except for normalization (when ppr->curvature_ini=1, tau=1:
leads to factor 1/2 difference between CRS formulas with
beta1=0). Identical to CAMB when om set to zero in theta_g,
theta_ur, shear_ur, tau
In the non-flat case the relation R=eta is still valid
outside the horizon for adiabatic IC. Hence eta is still
set to ppr->curvature_ini at leading order. Factors s2
appear through the solution of Einstein equations and
equations of motion. */
/* GDM_CLASS: spatial curvature s2_squared terms are not included when GDM is
requested, because they should be irrelevant at tau_ini ~ 0.001 for any sensible
omega_k. All the terms still in there are left overs from the original class.
Checked to be exactly 1. */
/* photon density */
if (pba->has_gdm == _TRUE_) { // GDM_CLASS
ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3.
* ppr->curvature_ini * s2_squared;
}
else {
ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.)
* ppr->curvature_ini * s2_squared;
}
/* photon velocity */
if (pba->has_gdm == _TRUE_) { // GDM_CLASS
ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36.
* ppr->curvature_ini * s2_squared;
}
else {
ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau)
* ppr->curvature_ini * s2_squared;
}
/* tighly-coupled baryons */
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */
/* cdm velocity vanishes in the synchronous gauge */
}
/* GDM_CLASS
Here is C=1, compare 3.7 of 1004.5509, (1.-om*tau/5.) includes a correction
factor in a matter radiation universe. But we can make sure that
omega tau << 1 . */
if (pba->has_gdm == _TRUE_) {
/* initial conditions need to be modified in case of time varying w*/
ppw->pv->y[ppw->pv->index_pt_delta_gdm] = (-(4.-3.*cs2_gdm)*(1.+w_gdm)/4./
csTerm4 + 12.*cv2_gdm*(cs2_gdm-w_gdm)/csTerm4/RnuTerm)
*ktau_two* ppr->curvature_ini;
ppw->pv->y[ppw->pv->index_pt_theta_gdm] = -(cs2_gdm/4./csTerm4 + 4.*cv2_gdm*(2.+3.
*(cs2_gdm-w_gdm))/3./(1.+w_gdm)/csTerm4/RnuTerm)
*ktau_three*k* ppr->curvature_ini ;
if (ppt->dynamic_shear_gdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_shear_gdm] = 8./3.*cv2_gdm/(1.+w_gdm)/RnuTerm*ktau_two*ppr->curvature_ini; /*from Hu's GDM paper, we only need initial conditions in case the fluid shear is dynamical */
}
}
/* END GDM_CLASS
/* interacting dark matter */
if (pba->has_idm_dr == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_idm_dr] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* idm_dr density */
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */
/* dcdm velocity velocity vanishes initially in the synchronous gauge */
}
/* fluid (assumes wa=0, if this is not the case the
fluid will catch anyway the attractor solution) */
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
if (pba->use_ppf == _FALSE_) {
ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+w_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature
ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*w_fld+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature
}
/* if use_ppf == _TRUE_, y[ppw->pv->index_pt_Gamma_fld] will be automatically set to zero, and this is what we want (although one could probably work out some small nonzero initial conditions: TODO) */
}
if (pba->has_scf == _TRUE_) {
/** - ---> Canonical field (solving for the perturbations):
* initial perturbations set to zero, they should reach the attractor soon enough.
* - ---> TODO: Incorporate the attractor IC from 1004.5509.
* delta_phi \f$ = -(a/k)^2/\phi'(\rho + p)\theta \f$,
* delta_phi_prime \f$ = a^2/\phi' \f$ (delta_rho_phi + V'delta_phi),
* and assume theta, delta_rho as for perfect fluid
* with \f$ c_s^2 = 1 \f$ and w = 1/3 (ASSUMES radiation TRACKING)
*/
ppw->pv->y[ppw->pv->index_pt_phi_scf] = 0.;
/* a*a/k/k/ppw->pvecback[pba->index_bg_phi_prime_scf]*k*ktau_three/4.*1./(4.-6.*(1./3.)+3.*1.) * (ppw->pvecback[pba->index_bg_rho_scf] + ppw->pvecback[pba->index_bg_p_scf])* ppr->curvature_ini * s2_squared; */
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] = 0.;
/* delta_fld expression * rho_scf with the w = 1/3, c_s = 1
a*a/ppw->pvecback[pba->index_bg_phi_prime_scf]*( - ktau_two/4.*(1.+1./3.)*(4.-3.*1.)/(4.-6.*(1/3.)+3.*1.)*ppw->pvecback[pba->index_bg_rho_scf] - ppw->pvecback[pba->index_bg_dV_scf]*ppw->pv->y[ppw->pv->index_pt_phi_scf])* ppr->curvature_ini * s2_squared; */
}
/* all relativistic relics: ur, early ncdm, dr */
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_) || (pba->has_idr == _TRUE_)) {
/* GDM_CLASS: removed the omega*tau terms and l3_ur if GDM requested */
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */
/* velocity of ultra-relativistic neutrinos/relics */ //TBC
if (pba->has_gdm == _TRUE_) { // GDM_CLASS
theta_ur = -(23.+4*fracnu)/36./RnuTerm*ktau_three*k * ppr->curvature_ini ;
shear_ur = 2./3./RnuTerm*ktau_two * ppr->curvature_ini;
l3_ur = 0.;
}
else {
theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared;
shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0
l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC
}
if (pba->has_dr == _TRUE_) delta_dr = delta_ur;
}
/* synchronous metric perturbation eta */
//eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared;
//eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
if (pba->has_gdm == _TRUE_) { // GDM_CLASS
eta = ppr->curvature_ini * (1.0 - (5.+4.*fracnu)/(12.*RnuTerm)*ktau_two);
}
else {
eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om));
}
}
/* isocurvature initial conditions taken from Bucher, Moodely,
Turok 99, with just a different normalization convention for
tau and the scale factor. [k tau] from BMT99 is left invariant
because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99
must be replaced by [frac_i*om*tau/4]. Some doubts remain about
the niv formulas, that should be recheked at some point. We
also checked that for bi,cdi,nid, everything coincides exactly
with the CAMB formulas. */
/** - --> (b.2.) Cold dark matter Isocurvature */
if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) {
class_test((pba->has_idr == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of interacting dark radiation");
/* GDM_CLASS: new test */
class_test((pba->has_gdm == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of GDM");
class_test(pba->has_cdm == _FALSE_,
ppt->error_message,
"not consistent to ask for CDI in absence of CDM!");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.3.) Baryon Isocurvature */
if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) {
/* GDM_CLASS: new test */
class_test((pba->has_gdm == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of GDM");
class_test((pba->has_idr == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of interacting dark radiation");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.;
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g];
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) {
delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g];
theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g];
shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.);
}
eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.);
}
/** - --> (b.4.) Neutrino density Isocurvature */
if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NID in absence of ur or ncdm species!");
/* GDM_CLASS: new test */
class_test((pba->has_gdm == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of GDM");
class_test((pba->has_idr == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of interacting dark radiation");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau);
ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two;
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau;
}
delta_ur = ppr->entropy_ini*(1.-ktau_two/6.);
theta_ur = ppr->entropy_ini*k*k*tau/4.;
shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.;
eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two;
}
/** - --> (b.5.) Neutrino velocity Isocurvature */
if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) {
class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_),
ppt->error_message,
"not consistent to ask for NIV in absence of ur or ncdm species!");
/* GDM_CLASS: new test */
class_test((pba->has_gdm == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of GDM");
class_test((pba->has_idr == _TRUE_),
ppt->error_message,
"only adiabatic ic in presence of interacting dark radiation");
ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg*
(1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k*
(-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.);
ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */
ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g];
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau;
}
delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */
theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.));
shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */
eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */
}
/** - (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */
if (ppt->gauge == synchronous) {
ppw->pv->y[ppw->pv->index_pt_eta] = eta;
}
/** - (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */
if (ppt->gauge == newtonian) {
/* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations:
alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a)
= [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a)
with
delta_tot = (delta_rho/rho_c)
= [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m)
= [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r)
= [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r)
velocity_tot = ((rho+p)theta/rho_c)
= [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m)
= [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r)
= [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r)
*/
if (pba->has_cdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm];
else if (pba->has_dcdm == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm];
else if (pba->has_idm_dr == _TRUE_)
delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_idm_dr];
else
delta_cdm=0.;
/* GDM_CLASS */
if (pba->has_gdm == _TRUE_){
delta_gdm = ppw->pv->y[ppw->pv->index_pt_delta_gdm];
theta_gdm = ppw->pv->y[ppw->pv->index_pt_theta_gdm];
}
else{
delta_gdm = 0.;
theta_gdm = 0.;
}
/* END GDM_CLASS */
// note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero.
delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm+ fracgdm*delta_gdm))/(1.+rho_m_over_rho_r); // GDM_CLASS: additional gdm term
velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b] + rho_m_over_rho_r*fracgdm*(1.+w_gdm)*theta_gdm)/(1.+rho_m_over_rho_r); // GDM_CLASS: additional gdm term
alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a;
ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha;
ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha;
if (pba->has_cdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha;
}
/* GDM_CLASS: gdm fluid */
if (pba->has_gdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_gdm] -= 3*(1.+w_gdm)*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_gdm] += k*k*alpha;
}
if (pba->has_idm_dr == _TRUE_){
ppw->pv->y[ppw->pv->index_pt_delta_idm_dr] -= 3.*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_idm_dr] = k*k*alpha;
/* comment on idm_dr initial conditions: theta_idm_dr is set later, together with theta_idr, if the tight coupling is on */
}
if (pba->has_dcdm == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha;
}
/* fluid */
if ((pba->has_fld == _TRUE_) && (pba->use_ppf == _FALSE_)) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+w_fld)*a_prime_over_a*alpha;
ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha;
}
/* scalar field: check */
if (pba->has_scf == _TRUE_) {
alpha_prime = 0.0;
/* - 2. * a_prime_over_a * alpha + eta
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */
ppw->pv->y[ppw->pv->index_pt_phi_scf] += alpha*ppw->pvecback[pba->index_bg_phi_prime_scf];
ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] +=
(-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]
-a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha
+ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime);
}
if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_) || (pba->has_idr == _TRUE_)) {
delta_ur -= 4.*a_prime_over_a*alpha;
theta_ur += k*k*alpha;
/* shear and l3 are gauge invariant */
if (pba->has_dr == _TRUE_)
delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha;
}
} /* end of gauge transformation to newtonian gauge */
/** - (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */
if (pba->has_ur == _TRUE_) {
ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur;
ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur;
ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur;
ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur;
}
if (pba->has_idr == _TRUE_){
ppw->pv->y[ppw->pv->index_pt_delta_idr] = delta_ur;
ppw->pv->y[ppw->pv->index_pt_theta_idr] = theta_ur;
if (ppt->idr_nature == idr_free_streaming){
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))){
ppw->pv->y[ppw->pv->index_pt_shear_idr] = shear_ur;
ppw->pv->y[ppw->pv->index_pt_l3_idr] = l3_ur;
}
}
}
if (pba->has_idm_dr == _TRUE_){
ppw->pv->y[ppw->pv->index_pt_theta_idm_dr] = theta_ur;
}
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) {
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
//Jump to next momentum bin:
idx += (ppw->pv->l_max_ncdm[n_ncdm]+1);
}
}
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr];
ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr;
ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr;
}
}
/** --> For tensors */
if (_tensors_) {
/** tensor initial conditions take into account the fact that
scalar (resp. tensor) \f$ C_l\f$'s are related to the real space
power spectrum of curvature (resp. of the tensor part of
metric perturbations)
\f[ <R(x) R(x)> \ \ \sum_{ij} <h_{ij}(x) h^{ij}(x)> \f]
In momentum space it is conventional to use the modes R(k)
and h(k) where the quantity h obeying to the equation of
propagation:
\f[ h'' + \frac{2a'}{a} h + [k2+2K] h = 12\pi Ga2 (\rho+p) \sigma = 8\pi Ga2 p \pi \f]
and the power spectra in real space and momentum space are related through:
\f[ <R(x) R(x)> = \int \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} <R(k)R(k)^*>\right] = \int \frac{dk}{k} \mathcal{P}_R(k) \f]
\f[\sum_{ij} <h_{ij}(x) h^{ij}(x)> = \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} F\left(\frac{k^2}{K}\right) <h(k)h(k)^*>\right] = \int \frac{dk}{k} F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
where \f$ \mathcal{P}_R\f$ and \f$ \mathcal{P}_h\f$ are the dimensionless spectrum of
curvature R, and F is a function of k2/K, where K is the curvature
parameter. F is equal to one in flat space (K=0), and coming
from the contraction of the laplacian eigentensor \f$ Q_{ij}\f$ with
itself. We will give F explicitly below.
Similarly the scalar (S) and tensor (T) \f$ C_l\f$'s are given by
\f[ C_l^S = 4\pi \int \frac{dk}{k} [\Delta_l^S(q)]^2 \mathcal{P}_R(k) \f]
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f]
The usual convention for the tensor-to-scalar ratio
\f$ r = A_t / A_s \f$ at pivot scale
= 16 epsilon in single-field inflation
is such that for constant \f$ \mathcal{P}_R(k)\f$ and \f$ \mathcal{P}_h(k)\f$,
\f[ r = 6 \frac{\mathcal{P}_h(k)}{\mathcal{P}_R(k)} \f]
so
\f[ \mathcal{P}_h(k) = \frac{\mathcal{P}_R(k) r}{6} = \frac{A_s r}{6} = \frac{A_t}{6} \f]
A priori it would make sense to say that for a power-law
primordial spectrum there is an extra factor \f$ (k/k_{pivot})^{n_t} \f$
(and eventually running and so on and so forth...)
However it has been shown that the minimal models of
inflation in a negatively curved bubble lead to
\f$ \mathcal{P}_h(k)=\tanh(\pi*\nu/2)\f$. In open models it is customary to
define the tensor tilt in a non-flat universe as a deviation
from this behavior rather than from true scale-invariance in
the above sense.
Hence we should have
\f[ \mathcal{P}_h(k) = \frac{A_t}{6} [ \tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)}\f]
where the brackets \f[ [...] \f] mean "if K<0"
Then
\f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \frac{A_t}{6} [\tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)} \f]
In the code, it is then a matter of choice to write:
- In the primordial module: \f$ \mathcal{P}_h(k) = \frac{A_t}{6} \tanh{(\pi*\frac{\nu}{2})} (k/k^*)^{n_T}\f$
- In the perturbation initial conditions: \f$ h = 1\f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f$
or:
- In the primordial module: \f$ \mathcal{P}_h(k) = A_t (k/k^*)^{n_T} \f$
- In the perturbation initial conditions: \f$ h = \sqrt{[F\left(\frac{k^2}{K}\right) / 6] \tanh{(\pi*\frac{\nu}{2})}} \f$
- In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 \mathcal{P}_h(k) \f$
We choose this last option, such that the primordial and
spectra module differ minimally in flat and non-flat space. Then we must impose
\f[ h = \sqrt{\left(\frac{F}{6}\right) \tanh{(\pi*\frac{\nu}{2})}} \f]
The factor F is found to be given by:
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{k2(k2-K)}{(k2+3K)(k2+2K)} \mathcal{P}_h(k) \f]
Introducing as usual \f$ q2 = k2 - 3K \f$ and using qdq = kdk this gives
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{(q2-3K)(q2-4K)}{q2(q2-K)} \mathcal{P}_h(k) \f]
Using qdq = kdk this is equivalent to
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dq}{q} \frac{q2-4K}{q2-K} \mathcal{P}_h(k(q)) \f]
Finally, introducing \f$ \nu=q/\sqrt{|K|}\f$ and sgnK=SIGN(k)\f$=\pm 1\f$, this could also be written
\f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{d\nu}{\nu} \frac{(\nu2-4sgnK)}{(\nu2-sgnK)} \mathcal{P}_h(k(\nu)) \f]
Equation (43,44) of Hu, Seljak, White, Zaldarriaga is
equivalent to absorbing the above factor
\f$ (\nu2-4sgnK)/(\nu2-sgnK)\f$ in the definition of the primordial
spectrum. Since the initial condition should be written in terms of k rather than nu, they should read
\f[ h = \sqrt{ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * \tanh{(\pi*\frac{\nu}{2})} } \f]
We leave the freedom to multiply by an arbitrary number
ppr->gw_ini. The standard convention corresponding to
standard definitions of r, \f$ A_T\f$, \f$ n_T\f$ is however ppr->gw_ini=1.
*
*/
if (index_ic == ppt->index_ic_ten) {
ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_;
}
k2 = k*k;
if (pba->sgnK != 0) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K));
}
if (pba->sgnK == -1) {
if (k*k+3*pba->K >= 0.) {
ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K)));
}
else {
ppw->pv->y[ppw->pv->index_pt_gw] = 0.;
}
}
}
return _SUCCESS_;
}
/**
* Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations.
*
* Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations:
* - check whether tight-coupling approximation is needed.
* - check whether radiation (photons, massless neutrinos...) perturbations are needed.
* - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare:
* -# that of recombination, \f$ \tau_c = 1/\kappa' \f$
* -# Hubble time scale, \f$ \tau_h = a/a' \f$
* -# Fourier mode, \f$ \tau_k = 1/k \f$
*
* So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$.
*
* However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c
* \ll \tau_k \f$, we can use the tight-coupling regime for photons
* and write equations in such way that the time scale \f$
* \tau_c \f$ becomes irrelevant (no effective mass term in \f$
* 1/\tau_c \f$). Then, the smallest
* scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$.
* In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$.
*
* Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg
* aH \f$, we can switch off radiation perturbations (i.e. switch on
* the free-streaming approximation) and then the smallest scale is
* simply \f$ \tau_h \f$.
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param ppw Input/Output: in output contains the approximation to be used at this time
* @return the error status
*/
int perturb_approximations(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback),
pba->error_message,
ppt->error_message);
class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0.,
ppt->error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]);
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
class_test(tau_c < 0.,
ppt->error_message,
"tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n",
tau_c,
1./ppw->pvecback[pba->index_bg_a]-1.,
tau,
ppw->pvecthermo[pth->index_th_xe]);
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
if(pba->has_idm_dr == _TRUE_){
if(ppw->pvecthermo[pth->index_th_dmu_idm_dr] == 0.){
ppw->approx[ppw->index_ap_tca_idm_dr] = (int)tca_idm_dr_off;
}
else{
class_test(1./ppw->pvecthermo[pth->index_th_dmu_idm_dr] < 0.,
ppt->error_message,
"negative tau_idm_dr=1/dmu_idm_dr=%e at z=%e, conformal time=%e.\n",
1./ppw->pvecthermo[pth->index_th_dmu_idm_dr],
1./ppw->pvecback[pba->index_bg_a]-1.,
tau);
if ((1./tau_h/ppw->pvecthermo[pth->index_th_dmu_idm_dr] < ppr->idm_dr_tight_coupling_trigger_tau_c_over_tau_h) &&
(1./tau_k/ppw->pvecthermo[pth->index_th_dmu_idm_dr] < ppr->idm_dr_tight_coupling_trigger_tau_c_over_tau_k) &&
(pth->nindex_idm_dr>=2) && (ppt->idr_nature == idr_free_streaming)) {
ppw->approx[ppw->index_ap_tca_idm_dr] = (int)tca_idm_dr_on;
}
else{
ppw->approx[ppw->index_ap_tca_idm_dr] = (int)tca_idm_dr_off;
//printf("tca_idm_dr_off = %d\n",tau);
}
}
}
/** - --> (c) free-streaming approximations */
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
/* interacting dark radiation free streaming approximation*/
if (pba->has_idr == _TRUE_){
if(pba->has_idm_dr==_TRUE_){
if ((tau/tau_k > ppr->idr_streaming_trigger_tau_over_tau_k) &&
((tau > pth->tau_idr_free_streaming) && (pth->nindex_idm_dr>=2)) &&
(ppr->idr_streaming_approximation != rsa_idr_none)){
ppw->approx[ppw->index_ap_rsa_idr] = (int)rsa_idr_on;
}
else{
ppw->approx[ppw->index_ap_rsa_idr] = (int)rsa_idr_off;
}
}
else{
if ((tau/tau_k > ppr->idr_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_idr_free_streaming) &&
(ppr->idr_streaming_approximation != rsa_idr_none)){
ppw->approx[ppw->index_ap_rsa_idr] = (int)rsa_idr_on;
}
else{
ppw->approx[ppw->index_ap_rsa_idr] = (int)rsa_idr_off;
}
}
}
if (pba->has_ur == _TRUE_) {
if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) &&
(ppr->ur_fluid_approximation != ufa_none)) {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_on;
}
else {
ppw->approx[ppw->index_ap_ufa] = (int)ufa_off;
}
}
if (pba->has_ncdm == _TRUE_) {
if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) &&
(ppr->ncdm_fluid_approximation != ncdmfa_none)) {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on;
}
else {
ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off;
}
}
}
/** - for tensor modes: */
if (_tensors_) {
/** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */
class_call(thermodynamics_at_z(pba,
pth,
1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
ppw->pvecback,
ppw->pvecthermo),
pth->error_message,
ppt->error_message);
/** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */
if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
/** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */
else {
/** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa];
/** - ----> (b.2.b) check whether tight-coupling approximation should be on */
if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) &&
(tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) {
ppw->approx[ppw->index_ap_tca] = (int)tca_on;
}
else {
ppw->approx[ppw->index_ap_tca] = (int)tca_off;
}
}
if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) &&
(tau > pth->tau_free_streaming) &&
(ppr->radiation_streaming_approximation != rsa_none)) {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_on;
}
else {
ppw->approx[ppw->index_ap_rsa] = (int)rsa_off;
}
}
return _SUCCESS_;
}
/**
* Compute typical timescale over which the perturbation equations
* vary. Some integrators (e.g. Runge-Kunta) benefit from calling this
* routine at each step in order to adapt the next step.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know the content of this pointer.
* - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param parameters_and_workspace Input: fixed parameters (e.g. indices), workspace, approximation used, etc.
* @param timescale Output: perturbation variation timescale (given the approximation used)
* @param error_message Output: error message
*/
int perturb_timescale(
double tau,
void * parameters_and_workspace,
double * timescale,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */
double tau_k;
/* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */
double tau_h;
/* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */
double tau_c;
/* various pointers allowing to extract the fields of the
parameter_and_workspace input structure */
struct perturb_parameters_and_workspace * pppaw;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
/** - extract the fields of the parameter_and_workspace input structure */
pppaw = parameters_and_workspace;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
/** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */
class_test(pppaw->k == 0.,
ppt->error_message,
"stop to avoid division by zero");
tau_k = 1./pppaw->k;
/** - evaluate background quantities with background_at_tau() and
Hubble time scale \f$ \tau_h = a/a' \f$ */
class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback),
pba->error_message,
error_message);
class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0.,
error_message,
"aH=0, stop to avoid division by zero");
tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]);
/** - for scalars modes: */
if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) {
*timescale = tau_h;
if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_))
*timescale = MIN(tau_k,*timescale);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for vector modes: */
if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
/** - for tensor modes: */
if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) {
*timescale = MIN(tau_h,tau_k);
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
ppw->inter_mode,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
if (pvecthermo[pth->index_th_dkappa] != 0.) {
/** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */
tau_c = 1./pvecthermo[pth->index_th_dkappa];
*timescale = MIN(tau_c,*timescale);
}
}
}
return _SUCCESS_;
}
/**
* Compute metric perturbations (those not integrated over time) using Einstein equations
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to the perturbation structure
* @param index_md Input: index of mode under consideration (scalar/.../tensor)
* @param k Input: wavenumber
* @param tau Input: conformal time
* @param y Input: vector of perturbations (those integrated over time) (already allocated)
* @param ppw Input/Output: in output contains the updated metric perturbations
* @return the error status
*/
int perturb_einstein(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double tau,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double k2,a,a2,a_prime_over_a;
double s2_squared;
double shear_g = 0.;
double shear_idr = 0.;
double shear_gdm=0., w_gdm=0., cv2_gdm=0.; // GDM_CLASS
/** - define wavenumber and scale factor related quantities */
k2 = k*k;
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
s2_squared = 1.-3.*pba->K/k2;
/** - sum up perturbations from all species */
class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw),
ppt->error_message,
ppt->error_message);
/** - for scalar modes: */
if (_scalars_) {
/* GDM_CLASS */
if (pba->has_gdm == _TRUE_) {
w_gdm = ppw->pvecback[pba->index_bg_w_gdm];
cv2_gdm = cv2_gdm_of_a_and_k(pba,a,k);
}
/** - --> infer metric perturbations from Einstein equations */
/* newtonian gauge */
if (ppt->gauge == newtonian) {
/* GDM_CLASS:
Define the algebraic shear patterned after the dynamic shear, constant w.
Newtonian gauge needs to numerically agree with the sync version, because it is
gauge inv. Need to produce the gdm contribution to rho_plus_p_shear here because
because we don't want to call from perturb_total_stress_energy
perturbed_einstein, because perturbed_einstein calls
perturbed_total_stress_energy in order to calculate mt quantities in
perturbed_einstein. We treat the dynamical shear in perturb_total_stress_energy
*/
if ((pba->has_gdm == _TRUE_) && (ppt->dynamic_shear_gdm == _FALSE_)) {
ppw->pvecmetric[ppw->index_mt_shear_gdm] = 8.* cv2_gdm /(15.*(1.+w_gdm)*a_prime_over_a)*y[ppw->pv->index_pt_theta_gdm];
shear_gdm = ppw->pvecmetric[ppw->index_mt_shear_gdm];
ppw->rho_plus_p_shear += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*shear_gdm;
}
/* in principle we could get phi from the constrain equation:
ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta);
with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2]
This was the case in class v1.3. However the integration is
more stable is we treat phi as a dynamical variable
y[ppw->pv->index_pt_phi], which derivative is given by the
second equation below (credits to Guido Walter Pettinari). */
/* equation for psi */
ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
/* equation for phi' */
ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta;
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
if ((pba->has_idr)&&(ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on)){
class_call(perturb_rsa_idr_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
/* first equation involving total density fluctuation */
ppw->pvecmetric[ppw->index_mt_h_prime] =
( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */
/* eventually, infer radiation streaming approximation for
gamma and ur (this is exactly the right place to do it
because the result depends on h_prime) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
}
if ((pba->has_idr==_TRUE_)&&(ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on)) {
class_call(perturb_rsa_idr_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw),
ppt->error_message,
ppt->error_message);
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_idr]*ppw->rsa_theta_idr;
}
/* second equation involving total velocity */
ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */
/* third equation involving total pressure */
ppw->pvecmetric[ppw->index_mt_h_prime_prime] =
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime]
+ 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta]
- 9. * a2 * ppw->delta_p;
/* alpha = (h'+6eta')/2k^2 */
ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2;
/* GDM_CLASS: define the algebraic shear, patterned after the dynamic shear, for
constant w. We need to add the gdm contribution of rho_plus_p_shear here because
we don't want to call perturbed_einstein from perturb_total_stress_energy */
if((pba->has_gdm == _TRUE_) && (ppt->dynamic_shear_gdm == _FALSE_)){
ppw->pvecmetric[ppw->index_mt_shear_gdm] = 8.* cv2_gdm /(15.*(1.+w_gdm)*a_prime_over_a)*(y[ppw->pv->index_pt_theta_gdm] + ppw->pvecmetric[ppw->index_mt_alpha]*k2);
shear_gdm = ppw->pvecmetric[ppw->index_mt_shear_gdm];
ppw->rho_plus_p_shear += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*shear_gdm;
}
/* eventually, infer first-order tight-coupling approximation for photon
shear, then correct the total shear */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g;
}
if ((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_on)){
shear_idr = 0.5*8./15./ppw->pvecthermo[pth->index_th_dmu_idm_dr]/ppt->alpha_idm_dr[0]*(y[ppw->pv->index_pt_theta_idr]+k2*ppw->pvecmetric[ppw->index_mt_alpha]);
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_idr]*shear_idr;
}
/* fourth equation involving total shear */
ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC
- 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha]
+ y[ppw->pv->index_pt_eta]
- 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
}
/* transform (delta_m, theta_m) of the current gauge into
gauge-independent variables (you could comment this out if you
really want gauge-dependent results) */
if (ppt->has_source_delta_m == _TRUE_) {
/* GDM_CLASS: additional (1 + w_matter) factor required */
if (pba->has_gdm == _TRUE_) {
double rho_m = ppw->pvecback[pba->index_bg_rho_b];
double P_m = 0.;
int n_ncdm;
rho_m += ppw->pvecback[pba->index_bg_rho_gdm];
P_m += w_gdm*ppw->pvecback[pba->index_bg_rho_gdm];
if (pba->has_cdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if (pba->has_dcdm == _TRUE_) {
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if (pba->has_ncdm == _TRUE_) {
for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) {
rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
P_m += ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
ppw->delta_m += 3. * (1. + P_m/rho_m) * ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;
}
/* END GDM_CLASS */
else {
ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;
// note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead
// of (3 aH). There is the same typo in the CLASSgal paper
// 1307.1459v1,v2,v3. It came from a confusion between (1+w_total)
// and (1+w_matter)=1 [the latter is the relevant one here].
//
// note2: at this point this gauge-invariant variable is only
// valid if all matter components are pressureless and
// stable. This relation will be generalized soon to the case
// of decaying dark matter.
}
}
if (ppt->has_source_delta_cb == _TRUE_) {
ppw->delta_cb += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_cb/k2;//check gauge transformation
}
if (ppt->has_source_theta_m == _TRUE_) {
if (ppt->gauge == synchronous) {
ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2;
}
}
if (ppt->has_source_theta_cb == _TRUE_){
if (ppt->gauge == synchronous) {
ppw->theta_cb += ppw->pvecmetric[ppw->index_mt_alpha]*k2; //check gauge transformation
}
}
}
/** - for vector modes */
if (_vectors_) {
if (ppt->gauge == newtonian) {
ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k;
}
if (ppt->gauge == synchronous) {
// assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)}
// from Hu and White:
ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2;
// what we suspect:
//ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi;
// if we use the other equation:
//ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v;
}
}
/** - for tensor modes */
if (_tensors_) {
/* single einstein equation for tensor perturbations */
ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source;
}
return _SUCCESS_;
}
/* GDM_CLASS: need to be careful if perturb_total_stress_energy is called in other
functions than perturbed_einstein: algeb. fluid shear is missing */
int perturb_total_stress_energy(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
int index_md,
double k,
double * y,
struct perturb_workspace * ppw
) {
/** Summary: */
/** - define local variables */
double a,a2,a_prime_over_a,k2;
double rho_plus_p_tot=0.;
double rho_m=0.;
double delta_rho_m=0.;
double rho_plus_p_m=0.;
double rho_plus_p_theta_m=0.;
double delta_g=0.;
double theta_g=0.;
double shear_g=0.;
double delta_ur=0.;
double theta_ur=0.;
double shear_ur=0.;
double delta_idr=0.;
double theta_idr=0.;
double shear_idr=0.;
double rho_delta_ncdm=0.;
double rho_plus_p_theta_ncdm=0.;
double rho_plus_p_shear_ncdm=0.;
double delta_p_ncdm=0.;
double factor;
double rho_plus_p_ncdm;
int index_q,n_ncdm,idx;
double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm;
double w_fld,dw_over_da_fld,integral_fld;
double gwncdm;
double rho_relativistic;
double rho_dr_over_f;
double delta_rho_scf, delta_p_scf, psi;
/** Variables used for FLD and PPF */
double c_gamma_k_H_square;
double Gamma_prime_plus_a_prime_over_a_Gamma, s2sq=1.;
double w_prime_fld, ca2_fld;
double alpha, alpha_prime, metric_euler;
double rho_t, p_t, rho_t_prime, p_t_prime;
double rho_fld, p_fld, rho_fld_prime, p_fld_prime;
double X, Y, Z, X_prime, Y_prime, Z_prime;
double Gamma_fld, S, S_prime, theta_t, theta_t_prime, rho_plus_p_theta_fld_prime;
double delta_p_b_over_rho_b;
/** - wavenumber and scale factor related quantities */
a = ppw->pvecback[pba->index_bg_a];
a2 = a * a;
a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a;
k2 = k*k;
/** - for scalar modes */
if (_scalars_) {
/** - --> (a) deal with approximation schemes */
/** - ---> (a.1.) photons */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> (a.1.1.) no approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
}
else {
/** - ----> (a.1.2.) radiation streaming approximation */
delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */
shear_g = 0.; /* shear always neglected in radiation streaming approximation */
}
}
else {
/** - ----> (a.1.3.) tight coupling approximation */
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
/* first-order tight-coupling approximation for photon shear */
if (ppt->gauge == newtonian) {
shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g];
}
else {
shear_g = 0.; /* in the synchronous gauge, the expression of
shear_g (at first-order in a tight-coupling
expansion) is a function of h' and eta'; but h'
and eta' are calculated in perturb_einstein()
as a function of delta_g and theta_g. Hence,
we set shear_g temporarily to zero, and set it
to the right first-order value in
perturb_einstein(), just before using the
Einstein equation for the shear. */
}
}
/** - ---> (a.2.) ur */
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */
shear_ur = 0.; /* shear always neglected in free streaming approximation */
}
}
/** - ---> (a.3.) baryon pressure perturbation */
if ((ppt->has_perturbed_recombination == _TRUE_) &&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)) {
delta_p_b_over_rho_b = ppw->pvecthermo[pth->index_th_wb]*(y[ppw->pv->index_pt_delta_b]+ y[ppw->pv->index_pt_perturbed_recombination_delta_temp]);
}
else {
delta_p_b_over_rho_b = ppw->pvecthermo[pth->index_th_cb2]*y[ppw->pv->index_pt_delta_b];
}
/** - ---> (a.4.) interacting dark radiation */
if (pba->has_idr == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off) {
delta_idr = y[ppw->pv->index_pt_delta_idr];
theta_idr = y[ppw->pv->index_pt_theta_idr];
if (ppt->idr_nature == idr_free_streaming){
if((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_on)){
if(ppt->gauge == newtonian)
shear_idr = 0.5*(8./15./ppw->pvecthermo[pth->index_th_dmu_idm_dr]/ppt->alpha_idm_dr[0]*(y[ppw->pv->index_pt_theta_idr]));
else
shear_idr = 0.; /* this is set in perturb_einstein, so here it's set to 0 */
}
else{
shear_idr = y[ppw->pv->index_pt_shear_idr];
}
}
}
else{
delta_idr = 0.;
theta_idr = 0.;
shear_idr = 0.;
}
}
/** - --> (b) compute the total density, velocity and shear perturbations */
/* photon and baryon contribution */
ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; // contribution to total perturbed stress-energy
ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g
+ ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; // contribution to total perturbed stress-energy
ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; // contribution to total perturbed stress-energy
ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g
+ ppw->pvecback[pba->index_bg_rho_b]*delta_p_b_over_rho_b; // contribution to total perturbed stress-energy
ppw->rho_plus_p_tot = 4./3. * ppw->pvecback[pba->index_bg_rho_g] + ppw->pvecback[pba->index_bg_rho_b];
if (ppt->has_source_delta_m == _TRUE_) {
delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; // contribution to delta rho_matter
rho_m = ppw->pvecback[pba->index_bg_rho_b];
}
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; // contribution to [(rho+p)theta]_matter
rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b];
}
/* cdm contribution */
if (pba->has_cdm == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; // contribution to total perturbed stress-energy
if (ppt->gauge == newtonian)
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; // contribution to total perturbed stress-energy
ppw->rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_cdm];
if (ppt->has_source_delta_m == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; // contribution to delta rho_matter
rho_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
if (ppt->gauge == newtonian)
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; // contribution to [(rho+p)theta]_matter
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm];
}
}
/* idm_dr contribution */
if (pba->has_idm_dr == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_idm_dr]*y[ppw->pv->index_pt_delta_idm_dr];
ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_idm_dr]*y[ppw->pv->index_pt_theta_idm_dr];
rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_idm_dr];
}
/* dcdm contribution */
if (pba->has_dcdm == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm];
ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm];
ppw->rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_dcdm];
if (ppt->has_source_delta_m == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; // contribution to delta rho_matter
rho_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; // contribution to [(rho+p)theta]_matter
rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm];
}
}
/* ultra-relativistic decay radiation */
if (pba->has_dr == _TRUE_) {
/* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the
convention in astro-ph/9907388 and f is defined as
f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
rho_dr_over_f = pow(pba->H0/a2,2);
ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1];
ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2];
ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr];
ppw->rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_dr];
}
/* ultra-relativistic neutrino/relics contribution */
if (pba->has_ur == _TRUE_) {
ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur;
ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur;
ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur;
ppw->rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_ur];
}
/* interacting dark radiation */
if (pba->has_idr == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_idr]*delta_idr;
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_idr]*theta_idr;
if (ppt->idr_nature==idr_free_streaming)
ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_idr]*shear_idr;
ppw->delta_p += 1./3. * ppw->pvecback[pba->index_bg_rho_idr]*delta_idr;
rho_plus_p_tot += 4./3. * ppw->pvecback[pba->index_bg_rho_idr];
}
/* infer delta_cb abd theta_cb (perturbations from CDM and baryons) before adding ncdm */
if ((ppt->has_source_delta_m == _TRUE_) && (ppt->has_source_delta_cb == _TRUE_))
ppw->delta_cb = delta_rho_m/rho_m;
if (((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) &&
((ppt->has_source_delta_cb == _TRUE_) || (ppt->has_source_theta_cb == _TRUE_)))
ppw->theta_cb = rho_plus_p_theta_m/rho_plus_p_m;
/* non-cold dark matter contribution */
if (pba->has_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg;
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = y[idx];
ppw->theta_ncdm[n_ncdm] = y[idx+1];
ppw->shear_ncdm[n_ncdm] = y[idx+2];
}
ppw->delta_rho += rho_ncdm_bg*y[idx];
ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1];
ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2];
ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx];
ppw->rho_plus_p_tot += rho_plus_p_ncdm;
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) {
ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
ppw->delta_rho += rho_delta_ncdm;
ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm;
ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm;
ppw->delta_p += delta_p_ncdm;
ppw->rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
if (ppt->has_source_delta_m == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm]; // contribution to delta rho_matter
rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
}
}
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])
*ppw->theta_ncdm[n_ncdm]; // contribution to [(rho+p)theta]_matter
rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
}
}
}
/* scalar field contribution.
In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred
from rho_plus_p_shear. So the contribution from the scalar field must be below all
species with non-zero shear.
*/
if (pba->has_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
/* equation for psi */
psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear;
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
delta_p_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
- ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);
}
ppw->delta_rho += delta_rho_scf;
ppw->rho_plus_p_theta += 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
ppw->delta_p += delta_p_scf;
ppw->rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_scf]+ppw->pvecback[pba->index_bg_p_scf];
}
/* add your extra species here */
/* GDM_CLASS: gdm contribution */
if (pba->has_gdm == _TRUE_) {
double w_gdm = ppw->pvecback[pba->index_bg_w_gdm];
double ca2_gdm = ppw->pvecback[pba->index_bg_ca2_gdm];
double cs2_gdm = cs2_gdm_of_a_and_k(pba,a,k,ppw);
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_delta_gdm];
ppw->rho_plus_p_theta += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_theta_gdm];
ppw->delta_p += (
cs2_gdm * ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_delta_gdm]
+ 3./k/k*a*ppw->pvecback[pba->index_bg_H]*(1.+w_gdm)*(cs2_gdm - ca2_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_theta_gdm]);
if(ppt->dynamic_shear_gdm == _TRUE_) {
double shear_gdm = y[ppw->pv->index_pt_shear_gdm];
ppw->rho_plus_p_shear += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*shear_gdm;
}
ppw->rho_plus_p_tot += ppw->pvecback[pba->index_bg_rho_gdm];
if (ppt->has_source_delta_m == _TRUE_) {
delta_rho_m += ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_delta_gdm]; // contribution to delta rho_matter
rho_m += ppw->pvecback[pba->index_bg_rho_gdm];
}
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) {
if (ppt->gauge == newtonian)
rho_plus_p_theta_m += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm]*y[ppw->pv->index_pt_theta_gdm]; // contribution to [(rho+p)theta]_matter
rho_plus_p_m += (1.+w_gdm)*ppw->pvecback[pba->index_bg_rho_gdm];
}
}
/* END GDM_CLASS */
/* fluid contribution */
if (pba->has_fld == _TRUE_) {
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
w_prime_fld = dw_over_da_fld * a_prime_over_a * a;
if (pba->use_ppf == _FALSE_) {
ppw->delta_rho_fld = ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld];
ppw->rho_plus_p_theta_fld = (1.+w_fld)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld];
ca2_fld = w_fld - w_prime_fld / 3. / (1.+w_fld) / a_prime_over_a;
/** We must gauge transform the pressure perturbation from the fluid rest-frame to the gauge we are working in */
ppw->delta_p_fld = pba->cs2_fld * ppw->delta_rho_fld + (pba->cs2_fld-ca2_fld)*(3*a_prime_over_a*ppw->rho_plus_p_theta_fld/k/k);
}
else {
s2sq = ppw->s_l[2]*ppw->s_l[2];
c_gamma_k_H_square = pow(pba->c_gamma_over_c_fld*k/a_prime_over_a,2)*pba->cs2_fld;
/** The equation is too stiff for Runge-Kutta when c_gamma_k_H_square is large.
Use the asymptotic solution Gamma=Gamma'=0 in that case.
*/
if (c_gamma_k_H_square > ppr->c_gamma_k_H_square_max)
Gamma_fld = 0.;
else
Gamma_fld = y[ppw->pv->index_pt_Gamma_fld];
if (ppt->gauge == synchronous){
alpha = (y[ppw->pv->index_pt_eta]+1.5*a2/k2/s2sq*(ppw->delta_rho+3*a_prime_over_a/k2*ppw->rho_plus_p_theta)-Gamma_fld)/a_prime_over_a;
alpha_prime = -2. * a_prime_over_a * alpha + y[ppw->pv->index_pt_eta] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear;
metric_euler = 0.;
}
else{
alpha = 0.;
alpha_prime = 0.;
metric_euler = k2*y[ppw->pv->index_pt_phi] - 4.5*a2*ppw->rho_plus_p_shear;
}
ppw->S_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*1.5*a2/k2/a_prime_over_a*
(ppw->rho_plus_p_theta/ppw->rho_plus_p_tot+k2*alpha);
// note that the last terms in the ratio do not include fld, that's correct, it's the whole point of the PPF scheme
/** We must now check the stiffenss criterion again and set Gamma_prime_fld accordingly. */
if (c_gamma_k_H_square > ppr->c_gamma_k_H_square_max){
ppw->Gamma_prime_fld = 0.;
}
else{
ppw->Gamma_prime_fld = a_prime_over_a*(ppw->S_fld/(1.+c_gamma_k_H_square) - (1.+c_gamma_k_H_square)*Gamma_fld);
}
Gamma_prime_plus_a_prime_over_a_Gamma = ppw->Gamma_prime_fld+a_prime_over_a*Gamma_fld;
// delta and theta in both gauges gauge:
ppw->rho_plus_p_theta_fld = ppw->pvecback[pba->index_bg_rho_fld]*(1.+w_fld)*ppw->rho_plus_p_theta/ppw->rho_plus_p_tot-
k2*2./3.*a_prime_over_a/a2/(1+4.5*a2/k2/s2sq*ppw->rho_plus_p_tot)*
(ppw->S_fld-Gamma_prime_plus_a_prime_over_a_Gamma/a_prime_over_a);
ppw->delta_rho_fld = -2./3.*k2*s2sq/a2*Gamma_fld-3*a_prime_over_a/k2*ppw->rho_plus_p_theta_fld;
/** Now construct the pressure perturbation, see 1903.xxxxx. */
/** Construct energy density and pressure for DE (_fld) and the rest (_t).
Also compute derivatives. */
rho_fld = ppw->pvecback[pba->index_bg_rho_fld];
p_fld = w_fld*rho_fld;
rho_fld_prime = -3*a_prime_over_a*(rho_fld+p_fld);
p_fld_prime = w_prime_fld*rho_fld-3*a_prime_over_a*(1+w_fld)*p_fld;
rho_t = ppw->pvecback[pba->index_bg_rho_tot] - rho_fld;
p_t = ppw->pvecback[pba->index_bg_p_tot] - p_fld;
rho_t_prime = -3*a_prime_over_a*(rho_t+p_t);
p_t_prime = ppw->pvecback[pba->index_bg_p_tot_prime]-p_fld_prime;
/** Compute background quantities X,Y,Z and their derivatives. */
X = c_gamma_k_H_square;
X_prime = -2*X*(a_prime_over_a + ppw->pvecback[pba->index_bg_H_prime]/ppw->pvecback[pba->index_bg_H]);
Y = 4.5*a2/k2/s2sq*(rho_t+p_t);
Y_prime = Y*(2.*a_prime_over_a+(rho_t_prime+p_t_prime)/(rho_t+p_t));
Z = 2./3.*k2*ppw->pvecback[pba->index_bg_H]/a;
Z_prime = Z*(ppw->pvecback[pba->index_bg_H_prime]/ppw->pvecback[pba->index_bg_H] - a_prime_over_a);
/** Construct theta_t and its derivative from the Euler equation */
theta_t = ppw->rho_plus_p_theta/rho_plus_p_tot;
theta_t_prime = -a_prime_over_a*theta_t-(p_t_prime*theta_t-k2*ppw->delta_p +k2*ppw->rho_plus_p_shear)/rho_plus_p_tot+metric_euler;
S = ppw->S_fld;
S_prime = -Z_prime/Z*S+1./Z*(rho_fld_prime+p_fld_prime)*(theta_t+k2*alpha)+1./Z*(rho_fld+p_fld)*(theta_t_prime+k2*alpha_prime);
/** Analytic derivative of the equation for ppw->rho_plus_p_theta_fld above. */
rho_plus_p_theta_fld_prime = Z_prime*(S-1./(1.+Y)*(S/(1.+1./X)+Gamma_fld*X)) +
Z*(S_prime + Y_prime/(1.+Y*Y+2*Y)*(S/(1.+1./X)+Gamma_fld*X)-
1./(1.+Y)*(S_prime/(1.+1./X)+S*X_prime/(1.+X*X+2*X)+ppw->Gamma_prime_fld*X+Gamma_fld*X_prime))-
k2*alpha_prime*(rho_fld+p_fld)-k2*alpha*(rho_fld_prime+p_fld_prime);
/** We can finally compute the pressure perturbation using the Euler equation for theta_fld */
ppw->delta_p_fld = (rho_plus_p_theta_fld_prime+4*a_prime_over_a* ppw->rho_plus_p_theta_fld - (rho_fld+p_fld)*metric_euler)/k2;
}
ppw->delta_rho += ppw->delta_rho_fld;
ppw->rho_plus_p_theta += ppw->rho_plus_p_theta_fld;
ppw->delta_p += ppw->delta_p_fld;
ppw->rho_plus_p_tot += (1.+w_fld)*ppw->pvecback[pba->index_bg_rho_fld];
}
/* don't add more species here, add them before the fluid contribution: because of the PPF scheme, the fluid must be the last one! */
/* store delta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable D
= delta_m - 2H'/H \theta_m/k^2 . */
if (ppt->has_source_delta_m == _TRUE_)
ppw->delta_m = delta_rho_m/rho_m;
/* store theta_m in the current gauge. In perturb_einstein, this
will be transformed later on into the gauge-independent variable
Theta . Note that computing theta_m is necessary also if we want
the delta_m source only, because the gauge-invariant delta_m
involves theta_m in the current gauge. */
if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_))
ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m;
/* could include Lambda contribution to rho_tot (not done to match CMBFAST/CAMB definition) */
}
/** - for vector modes */
if (_vectors_) {
ppw->vector_source_pi = 0.;
ppw->vector_source_v = 0.;
/** - --> photon contribution to vector sources: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (-1./4.*_SQRT2_)
* (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]);
ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g]
* (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k))
* (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]);
}
}
/** - --> baryons */
}
/** - for tensor modes */
if (_tensors_) {
ppw->gw_source = 0.0;
/** - --> photon contribution to gravitational wave source: */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */
ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]*
(1./15.*y[ppw->pv->index_pt_delta_g]+
4./21.*y[ppw->pv->index_pt_shear_g]+
1./35.*y[ppw->pv->index_pt_l3_g+1]));
}
}
/** - --> ur contribution to gravitational wave source: */
if (ppt->evolve_tensor_ur == _TRUE_){
rho_relativistic = 0.;
if (ppt->tensor_method == tm_exact)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (ppt->tensor_method == tm_massless_approximation) {
if (pba->has_ur == _TRUE_)
rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur];
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) {
/* (3 p_ncdm1) is the "relativistic" contribution to rho_ncdm1 */
rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm];
}
}
}
ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic*
(1./15.*y[ppw->pv->index_pt_delta_ur]+
4./21.*y[ppw->pv->index_pt_shear_ur]+
1./35.*y[ppw->pv->index_pt_l3_ur+1]));
}
/** - --> ncdm contribution to gravitational wave source: */
if (ppt->evolve_tensor_ncdm == _TRUE_){
idx = ppw->pv->index_pt_psi0_ncdm1;
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
gwncdm = 0.;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]);
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
gwncdm *= -_SQRT6_*4*a2*factor;
ppw->gw_source += gwncdm;
}
}
}
return _SUCCESS_;
}
/**
* Compute the source functions (three terms for temperature, one for
* E or B modes, etc.)
*
* This is one of the few functions in the code which is passed to
* the generic_integrator() routine. Since generic_integrator()
* should work with functions passed from various modules, the format
* of the arguments is a bit special:
*
* - fixed parameters and workspaces are passed through a generic
* pointer. generic_integrator() doesn't know the content of this
* pointer.
*
* - the error management is a bit special: errors are not written as
* usual to pth->error_message, but to a generic error_message passed
* in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of time derivative of perturbations
* @param index_tau Input: index in the array tau_sampling
* @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in output, source terms
* @param error_message Output: error message
* @return the error status
*/
int perturb_sources(
double tau,
double * y,
double * dy,
int index_tau,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
double P;
int index_tp;
struct perturb_parameters_and_workspace * pppaw;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
int index_md;
int index_ic;
int index_k;
double k;
double z;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g, delta_rho_scf, rho_plus_p_theta_scf;
double a_prime_over_a=0.; /* (a'/a) */
double a_prime_over_a_prime=0.; /* (a'/a)' */
double w_fld,dw_over_da_fld,integral_fld;
int switch_isw = 1;
double a_rel, a2_rel, f_dr;
double rho_plus_p_tot, H_T_Nb_prime=0., rho_tot;
double theta_over_k2,theta_shift;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
index_md = pppaw->index_md;
index_ic = pppaw->index_ic;
index_k = pppaw->index_k;
k = pppaw->k;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
z = pba->a_today/pvecback[pba->index_bg_a]-1.;
class_call(thermodynamics_at_z(pba,
pth,
z, /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today;
a2_rel = a_rel * a_rel;
a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */
a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */
/** - for scalars */
if (_scalars_) {
/** - --> compute metric perturbations */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
P = 0.;
}
else {
delta_g = y[ppw->pv->index_pt_delta_g];
if (ppw->approx[ppw->index_ap_tca] == (int)tca_on)
P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */
else
P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.;
}
/** - --> for each type, compute source terms */
/* scalar temperature */
if (ppt->has_source_t == _TRUE_) {
/* check whether integrated Sachs-Wolf term should be included */
if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){
switch_isw = 0;
}
if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) {
switch_isw=0;
}
/* newtonian gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P;
}
*/
/* newtonian gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == newtonian) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime])
+ ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b]
+ pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]);
_set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]);
_set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
/* synchronous gauge: simplest form, not efficient numerically */
/*
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g;
_set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k;
_set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P;
}
*/
/* synchronous gauge: slightly more complicated form, but more efficient numerically */
if (ppt->gauge == synchronous) {
_set_source_(ppt->index_tp_t0) =
ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime])
+ switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta]
- pvecmetric[ppw->index_mt_alpha_prime]
- 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha])
+ pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]))
+ ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime])
+pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha]));
_set_source_(ppt->index_tp_t1) =
switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime]
+ 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]
- y[ppw->pv->index_pt_eta]);
_set_source_(ppt->index_tp_t2) =
ppt->switch_pol * pvecthermo[pth->index_th_g] * P;
}
}
/* scalar polarization */
if (ppt->has_source_p == _TRUE_) {
/* all gauges. Note that the correct formula for the E source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
/* now, non-CMB sources */
/* H_T_prime in N-body gauge. (H_T=3zeta where zeta is the comoving curvature perturbation.).
See equation A.5 in 1811.00904.*/
if (ppt->has_source_H_T_Nb_prime == _TRUE_) {
rho_plus_p_tot = (pvecback[pba->index_bg_rho_tot]+pvecback[pba->index_bg_p_tot]);
H_T_Nb_prime = 3*a_prime_over_a/rho_plus_p_tot*(-ppw->delta_p+
pvecback[pba->index_bg_p_tot_prime]*ppw->rho_plus_p_theta/rho_plus_p_tot/k/k+
ppw->rho_plus_p_shear);
_set_source_(ppt->index_tp_H_T_Nb_prime) = H_T_Nb_prime;
/** gamma in Nbody gauge, see Eq. A.2 in 1811.00904. */
if (ppt->has_source_k2gamma_Nb == _TRUE_){
_set_source_(ppt->index_tp_k2gamma_Nb) = -a_prime_over_a*H_T_Nb_prime+9./2.*a2_rel*ppw->rho_plus_p_shear;
}
}
if (ppt->has_source_k2gamma_Nb == _TRUE_) {
class_stop(ppt->error_message,"We need to compute the derivative of H_T_Nb_prime numerically. Written by T. Tram but not yet propagated here. See devel branch prior to merging with hmcode branch");
}
/* Bardeen potential -PHI_H = phi in Newtonian gauge */
if (ppt->has_source_phi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha];
}
/* its derivative phi' */
if (ppt->has_source_phi_prime == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta]
- a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha]
- a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime];
}
/* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */
if (ppt->has_source_phi_plus_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_phi_plus_psi) =
y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* Bardeen potential PHI_A = psi in newtonian gauge */
if (ppt->has_source_psi == _TRUE_) {
if (ppt->gauge == newtonian)
_set_source_(ppt->index_tp_psi) =
pvecmetric[ppw->index_mt_psi];
if (ppt->gauge == synchronous)
_set_source_(ppt->index_tp_psi) =
a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime];
}
/* the metric potentials h and eta in synchronous gauge */
if (ppt->gauge == synchronous) {
/* cdm is always on in synchronous gauge, see error message above that checks gauge and has_cdm */
if (ppt->has_source_h == _TRUE_)
_set_source_(ppt->index_tp_h) = - 2 * y[ppw->pv->index_pt_delta_cdm];
if (ppt->has_source_h_prime == _TRUE_)
_set_source_(ppt->index_tp_h_prime) = pvecmetric[ppw->index_mt_h_prime];
if (ppt->has_source_eta == _TRUE_)
_set_source_(ppt->index_tp_eta) = y[ppw->pv->index_pt_eta];
if (ppt->has_source_eta_prime == _TRUE_)
_set_source_(ppt->index_tp_eta_prime) = dy[ppw->pv->index_pt_eta];
}
/* total matter overdensity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_delta_m == _TRUE_) {
_set_source_(ppt->index_tp_delta_m) = ppw->delta_m;
}
/* cdm and baryon over density */
if (ppt->has_source_delta_cb == _TRUE_) {
_set_source_(ppt->index_tp_delta_cb) = ppw->delta_cb;
}
/* compute the corrections that have to be applied to each (delta_i, theta_i) in N-body gauge */
if (ppt->has_Nbody_gauge_transfers == _TRUE_){
theta_over_k2 = ppw->rho_plus_p_theta/(pvecback[pba->index_bg_rho_tot]+pvecback[pba->index_bg_p_tot]);
theta_shift = H_T_Nb_prime;
if (ppt->gauge == synchronous) theta_shift += pvecmetric[ppw->index_mt_alpha]*k*k;
}
else{
theta_over_k2 = 0.;
theta_shift = 0.;
}
/* delta_tot */
if (ppt->has_source_delta_tot == _TRUE_) {
/** We follow the (debatable) CMBFAST/CAMB convention of not including rho_lambda in rho_tot */
if (pba->has_lambda == _TRUE_){
rho_tot = pvecback[pba->index_bg_rho_tot] - pvecback[pba->index_bg_rho_lambda];
}
else{
rho_tot = pvecback[pba->index_bg_rho_tot];
}
_set_source_(ppt->index_tp_delta_tot) = ppw->delta_rho/rho_tot
+ 3*a_prime_over_a*(1+pvecback[pba->index_bg_p_tot]/pvecback[pba->index_bg_rho_tot])*theta_over_k2;
}
/* delta_g */
if (ppt->has_source_delta_g == _TRUE_) {
_set_source_(ppt->index_tp_delta_g) = delta_g
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_baryon */
if (ppt->has_source_delta_b == _TRUE_) {
_set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b]
+ 3.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_cdm */
if (ppt->has_source_delta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm]
+ 3.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* GDM_CLASS: delta_gdm */
if (ppt->has_source_delta_gdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_gdm) = y[ppw->pv->index_pt_delta_gdm]
+ 3.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_dcdm */
if (ppt->has_source_delta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm]
+ (3.*a_prime_over_a+a_rel*pba->Gamma_dcdm)*theta_over_k2; // N-body gauge correction;
}
/* delta_fld */
if (ppt->has_source_delta_fld == _TRUE_) {
_set_source_(ppt->index_tp_delta_fld) = ppw->delta_rho_fld/pvecback[pba->index_bg_rho_fld]
+ 3.*a_prime_over_a*(1.+pvecback[pba->index_bg_w_fld])*theta_over_k2; // N-body gauge correction
}
/* delta_scf */
if (ppt->has_source_delta_scf == _TRUE_) {
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf])
+ 3.*a_prime_over_a*(1.+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf])*theta_over_k2; // N-body gauge correction
}
else{
delta_rho_scf = 1./3.*
(1./a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2_rel*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi])
+ 3.*a_prime_over_a*(1.+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf])*theta_over_k2; // N-body gauge correction
}
_set_source_(ppt->index_tp_delta_scf) = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
}
/* delta_dr */
if (ppt->has_source_delta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_ur */
if (ppt->has_source_delta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur]
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
else
_set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_idr */
if (ppt->has_source_delta_idr == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off)
_set_source_(ppt->index_tp_delta_idr) = y[ppw->pv->index_pt_delta_idr]
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
else
_set_source_(ppt->index_tp_delta_idr) = ppw->rsa_delta_idr
+ 4.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_idm_dr */
if (ppt->has_source_delta_idm_dr == _TRUE_) {
_set_source_(ppt->index_tp_delta_idm_dr) = y[ppw->pv->index_pt_delta_idm_dr]
+ 3.*a_prime_over_a*theta_over_k2; // N-body gauge correction
}
/* delta_ncdm1 */
if (ppt->has_source_delta_ncdm == _TRUE_) {
for (index_tp = ppt->index_tp_delta_ncdm1; index_tp < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_tp++) {
_set_source_(index_tp) = ppw->delta_ncdm[index_tp - ppt->index_tp_delta_ncdm1]
+ 3.*a_prime_over_a*(1+pvecback[index_tp - ppt->index_tp_delta_ncdm1 + pba->index_bg_p_ncdm1]
/pvecback[index_tp - ppt->index_tp_delta_ncdm1 + pba->index_bg_rho_ncdm1])*theta_over_k2; // N-body gauge correction
}
}
/* total velocity */
if (ppt->has_source_theta_tot == _TRUE_) {
_set_source_(ppt->index_tp_theta_tot) = ppw->rho_plus_p_theta/(pvecback[pba->index_bg_rho_tot]+pvecback[pba->index_bg_p_tot])
+ theta_shift; // N-body gauge correction
}
/* total matter velocity (gauge-invariant, defined as in arXiv:1307.1459) */
if (ppt->has_source_theta_m == _TRUE_) {
_set_source_(ppt->index_tp_theta_m) = ppw->theta_m;
}
/* cdm and baryon velocity */
if (ppt->has_source_theta_cb == _TRUE_) {
_set_source_(ppt->index_tp_theta_cb) = ppw->theta_cb;
}
/* total velocity */
if (ppt->has_source_theta_tot == _TRUE_) {
_set_source_(ppt->index_tp_theta_tot) = ppw->rho_plus_p_theta/ppw->rho_plus_p_tot
+ theta_shift; // N-body gauge correction
}
/* theta_g */
if (ppt->has_source_theta_g == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g]
+ theta_shift; // N-body gauge correction
else
_set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g
+ theta_shift; // N-body gauge correction
}
/* theta_baryon */
if (ppt->has_source_theta_b == _TRUE_) {
_set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b]
+ theta_shift; // N-body gauge correction
}
/* theta_cdm */
if (ppt->has_source_theta_cdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm]
+ theta_shift; // N-body gauge correction
}
/* GDM_CLASS: theta_gdm */
if (ppt->has_source_theta_gdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_gdm) = y[ppw->pv->index_pt_theta_gdm]
+ theta_shift; // N-body gauge correction
}
/* theta_idm_dr */
if (ppt->has_source_theta_idm_dr == _TRUE_) {
_set_source_(ppt->index_tp_theta_idm_dr) = y[ppw->pv->index_pt_theta_idm_dr]
+ theta_shift; // N-body gauge correction
}
/* theta_dcdm */
if (ppt->has_source_theta_dcdm == _TRUE_) {
_set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm]
+ theta_shift; // N-body gauge correction
}
/* theta_fld */
if (ppt->has_source_theta_fld == _TRUE_) {
class_call(background_w_fld(pba,a_rel*pba->a_today,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
_set_source_(ppt->index_tp_theta_fld) = ppw->rho_plus_p_theta_fld/(1.+w_fld)/pvecback[pba->index_bg_rho_fld]
+ theta_shift; // N-body gauge correction
}
/* theta_scf */
if (ppt->has_source_theta_scf == _TRUE_) {
rho_plus_p_theta_scf = 1./3.*
k*k/a2_rel*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
_set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf])
+ theta_shift; // N-body gauge correction
}
/* theta_dr */
if (ppt->has_source_theta_dr == _TRUE_) {
f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
_set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr
+ theta_shift; // N-body gauge correction
}
/* theta_ur */
if (ppt->has_source_theta_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off)
_set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur]
+ theta_shift; // N-body gauge correction
else
_set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur
+ theta_shift; // N-body gauge correction
}
/* theta_idr */
if (ppt->has_source_theta_idr == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa_idr]==(int)rsa_idr_off)
_set_source_(ppt->index_tp_theta_idr) = y[ppw->pv->index_pt_theta_idr]
+ theta_shift; // N-body gauge correction
else
_set_source_(ppt->index_tp_theta_idr) = ppw->rsa_theta_idr
+ theta_shift; // N-body gauge correction
}
/* theta_ncdm1 */
if (ppt->has_source_theta_ncdm == _TRUE_) {
for (index_tp = ppt->index_tp_theta_ncdm1; index_tp < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_tp++) {
_set_source_(index_tp) = ppw->theta_ncdm[index_tp - ppt->index_tp_theta_ncdm1]
+ theta_shift; // N-body gauge correction
}
}
}
/** - for tensors */
if (_tensors_) {
/** - --> compute quantities depending on approximation schemes */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
P = -(1./10.*y[ppw->pv->index_pt_delta_g]
+2./7.*y[ppw->pv->index_pt_shear_g]
+3./70.*y[ppw->pv->index_pt_delta_g+4]
-3./5.*y[ppw->pv->index_pt_pol0_g]
+6./7.*y[ppw->pv->index_pt_pol2_g]
-3./70.*y[ppw->pv->index_pt_pol0_g+4])
/sqrt(6.);
}
else {
P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC
}
}
else {
P = 0.;
}
/* tensor temperature */
if (ppt->has_source_t == _TRUE_) {
_set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P;
}
/* tensor polarization */
if (ppt->has_source_p == _TRUE_) {
/* Note that the correct formula for the polarization source
should have a minus sign, as shown in Hu & White. We put a
plus sign to comply with the 'historical convention'
established in CMBFAST and CAMB. */
_set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P;
}
}
return _SUCCESS_;
}
/**
* When testing the code or a cosmological model, it can be useful to
* output perturbations at each step of integration (and not just the
* delta's at each source sampling point, which is achieved simply by
* asking for matter transfer functions). Then this function can be
* passed to the generic_evolver routine.
*
* By default, instead of passing this function to generic_evolver,
* one passes a null pointer. Then this function is just not used.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Input: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input: fixed parameters (e.g. indices)
* @param error_message Output: error message
*
*/
int perturb_print_variables(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
struct perturb_parameters_and_workspace * pppaw;
/** Summary: */
/** - define local variables */
double k;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g;
double delta_b,theta_b;
double delta_cdm=0.,theta_cdm=0.;
double delta_idm_dr=0.,theta_idm_dr=0.;
double delta_dcdm=0.,theta_dcdm=0.;
double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0;
double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.;
double delta_idr=0., theta_idr=0., shear_idr=0.;
double delta_rho_scf=0., rho_plus_p_theta_scf=0.;
double delta_scf=0., theta_scf=0.;
/** - ncdm sector begins */
int n_ncdm;
double *delta_ncdm=NULL, *theta_ncdm=NULL, *shear_ncdm=NULL, *delta_p_over_delta_rho_ncdm=NULL;
double rho_ncdm_bg, p_ncdm_bg, pseudo_p_ncdm, w_ncdm;
double rho_delta_ncdm = 0.0;
double rho_plus_p_theta_ncdm = 0.0;
double rho_plus_p_shear_ncdm = 0.0;
double delta_p_ncdm = 0.0;
double factor = 0.0;
double q,q2,epsilon;
/** - ncdm sector ends */
double phi=0.,psi=0.,alpha=0.;
double delta_temp=0., delta_chi=0.;
/* GDM_CLASS */
double delta_gdm=0.,theta_gdm=0.,shear_gdm=0.,pinad_gdm=0.;
double w_gdm=0.,cs2_gdm=0.,ca2_gdm=0.;
double temperC=0, ISW1C=0, ISW2C=0, dopplC=0, doppldotC=0;
double a_prime_over_a=0.,a_prime_over_a_prime=0.;
/* END GDM_CLASS */
double a,a2,H;
int idx,index_q, storeidx;
double *dataptr;
/** - rename structure fields (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
/** - update background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1.,
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - update metric perturbations in this point */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
a = pvecback[pba->index_bg_a];
a2 = a*a;
H = pvecback[pba->index_bg_H];
/* GDM_CLASS: needed quantities */
a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */
a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */
/* END GDM_CLASS */
if (pba->has_ncdm == _TRUE_){
class_alloc(delta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(theta_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(shear_ncdm, sizeof(double)*pba->N_ncdm,error_message);
class_alloc(delta_p_over_delta_rho_ncdm, sizeof(double)*pba->N_ncdm,error_message);
}
/** - calculate perturbed recombination */
if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){
delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
}
/** - for scalar modes */
if (_scalars_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
theta_g = y[ppw->pv->index_pt_theta_g];
}
else {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) {
shear_g = ppw->tca_shear_g;
//l3_g = 6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol0_g = 2.5*ppw->tca_shear_g;
pol1_g = 7./12.*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
pol2_g = 0.5*ppw->tca_shear_g;
//pol3_g = 0.25*6./7.*k/pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g;
}
else {
shear_g = y[ppw->pv->index_pt_shear_g];
//l3_g = y[ppw->pv->index_pt_l3_g];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol1_g = y[ppw->pv->index_pt_pol1_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
//pol3_g = y[ppw->pv->index_pt_pol3_g];
}
}
else {
shear_g = 0;
//l3_g = 0;
pol0_g = 0;
pol1_g = 0;
pol2_g = 0;
//pol3_g = 0.;
}
if (pba->has_ur == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
delta_ur = y[ppw->pv->index_pt_delta_ur];
theta_ur = y[ppw->pv->index_pt_theta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
}
else {
delta_ur = ppw->rsa_delta_ur;
theta_ur = ppw->rsa_theta_ur;
shear_ur = 0.;
}
}
delta_b = y[ppw->pv->index_pt_delta_b];
theta_b = y[ppw->pv->index_pt_theta_b];
/* interacting dark radiation */
if (pba->has_idr == _TRUE_) {
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off) {
delta_idr = y[ppw->pv->index_pt_delta_idr];
theta_idr = y[ppw->pv->index_pt_theta_idr];
if(ppt->idr_nature == idr_free_streaming){
if((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_on)){
shear_idr = ppw->tca_shear_idm_dr;
}
else{
shear_idr = y[ppw->pv->index_pt_shear_idr];
}
}
}
else{
delta_idr = ppw->rsa_delta_idr;
theta_idr = ppw->rsa_theta_idr;
shear_idr = 0.;
}
}
/* interacting dark matter */
if (pba->has_idm_dr == _TRUE_) {
delta_idm_dr = y[ppw->pv->index_pt_delta_idm_dr];
theta_idm_dr = y[ppw->pv->index_pt_theta_idm_dr];
}
if (pba->has_cdm == _TRUE_) {
delta_cdm = y[ppw->pv->index_pt_delta_cdm];
if (ppt->gauge == synchronous) {
theta_cdm = 0.;
}
else {
theta_cdm = y[ppw->pv->index_pt_theta_cdm];
}
}
/* gravitational potentials */
if (ppt->gauge == synchronous) {
alpha = pvecmetric[ppw->index_mt_alpha];
psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime];
phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
}
else if (ppt->gauge == newtonian){
psi = pvecmetric[ppw->index_mt_psi];
phi = y[ppw->pv->index_pt_phi];
}
else{
psi = 0.0;
phi = 0.0;
}
/* GDM_CLASS */
/* New sources */
if (ppt->gauge == synchronous) {
temperC = delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime];
ISW1C = y[ppw->pv->index_pt_eta] - pvecmetric[ppw->index_mt_alpha_prime] - 2.*a_prime_over_a*alpha ;
ISW2C = pvecmetric[ppw->index_mt_eta_prime] - a_prime_over_a_prime*alpha - a_prime_over_a*pvecmetric[ppw->index_mt_alpha_prime];
dopplC = dy[ppw->pv->index_pt_theta_b]/k/k+ pvecmetric[ppw->index_mt_alpha_prime];
doppldotC = y[ppw->pv->index_pt_theta_b]/k/k + alpha;
}
/* Newtonian sources to be filled in*/
else if (ppt->gauge == newtonian) {
temperC = 0.;
ISW1C = 0.;
ISW2C= pvecmetric[ppw->index_mt_phi_prime] ;
dopplC=0.;
doppldotC=0.;
}
else {
temperC = 0.;
ISW1C = 0.;
ISW2C=0.;
dopplC=0.;
doppldotC=0.;
}
/* added gdm fluid */
if (pba->has_gdm == _TRUE_) {
w_gdm = ppw->pvecback[pba->index_bg_w_gdm];
ca2_gdm = ppw->pvecback[pba->index_bg_ca2_gdm];
cs2_gdm = cs2_gdm_of_a_and_k(pba,a,k,ppw);
delta_gdm = y[ppw->pv->index_pt_delta_gdm];
theta_gdm = y[ppw->pv->index_pt_theta_gdm];
if (ppt->dynamic_shear_gdm == _TRUE_) {
shear_gdm = y[ppw->pv->index_pt_shear_gdm];
}
else {
shear_gdm = ppw->pvecmetric[ppw->index_mt_shear_gdm];
}
pinad_gdm = (cs2_gdm - ca2_gdm)*(delta_gdm + theta_gdm/k/k*3.0*pvecback[pba->index_bg_a]*pvecback[pba->index_bg_H]*(1.+ w_gdm)) ;
}
/* END GDM_CLASS */
if (pba->has_ncdm == _TRUE_) {
/** - --> Get delta, deltaP/rho, theta, shear and store in array */
idx = ppw->pv->index_pt_psi0_ncdm1;
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){
// The perturbations are evolved integrated:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm];
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm];
w_ncdm = p_ncdm_bg/rho_ncdm_bg;
delta_ncdm[n_ncdm] = y[idx];
theta_ncdm[n_ncdm] = y[idx+1];
shear_ncdm[n_ncdm] = y[idx+2];
//This is the adiabatic sound speed:
delta_p_over_delta_rho_ncdm[n_ncdm] = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg));
idx += ppw->pv->l_max_ncdm[n_ncdm]+1;
}
}
else{
// We must integrate to find perturbations:
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
delta_p_over_delta_rho_ncdm[n_ncdm] = delta_p_ncdm/rho_delta_ncdm;
}
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm = y[ppw->pv->index_pt_delta_dcdm];
theta_dcdm = y[ppw->pv->index_pt_theta_dcdm];
}
if (pba->has_dr == _TRUE_) {
f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr;
theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr;
shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr;
}
if (pba->has_scf == _TRUE_){
if (ppt->gauge == synchronous){
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]);
}
else{
delta_rho_scf = 1./3.*
(1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf]
+ ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf]
- 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]);
}
rho_plus_p_theta_scf = 1./3.*
k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];
delta_scf = delta_rho_scf/pvecback[pba->index_bg_rho_scf];
theta_scf = rho_plus_p_theta_scf/(pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]);
}
/* converting synchronous variables to newtonian ones */
if (ppt->gauge == synchronous) {
/* density and velocity perturbations (comment out if you wish to keep synchronous variables) */
delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_g += k*k*alpha;
delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_b += k*k*alpha;
if (pba->has_ur == _TRUE_) {
delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_ur += k*k*alpha;
}
if (pba->has_idr == _TRUE_) {
delta_idr -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_idr += k*k*alpha;
}
if (pba->has_dr == _TRUE_) {
delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha;
theta_dr += k*k*alpha;
}
if (pba->has_cdm == _TRUE_) {
delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_cdm += k*k*alpha;
}
/* GDM_CLASS: added gdm*/
if (pba->has_gdm == _TRUE_) {
delta_gdm -= 3*(1.+w_gdm)*pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_gdm += k*k*alpha;
}
if (pba->has_idm_dr == _TRUE_) {
delta_idm_dr -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha;
theta_idm_dr += k*k*alpha;
}
if (pba->has_ncdm == _TRUE_) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
/** - --> TODO: gauge transformation of delta, deltaP/rho (?) and theta using -= 3aH(1+w_ncdm) alpha for delta. */
}
}
if (pba->has_dcdm == _TRUE_) {
delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H);
theta_dcdm += k*k*alpha;
}
if (pba->has_scf == _TRUE_) {
delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf]));
theta_scf += k*k*alpha;
}
}
// fprintf(ppw->perturb_output_file," ");
/** - --> Handle (re-)allocation */
if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_scalar_titles,
error_message);
ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->scalar_perturbations_data[ppw->index_ikout] =
realloc(ppt->scalar_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles));
}
storeidx = 0;
dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+
ppt->size_scalar_perturbation_data[ppw->index_ikout];
ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles;
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, theta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol1_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, delta_b, _TRUE_, storeidx);
class_store_double(dataptr, theta_b, _TRUE_, storeidx);
class_store_double(dataptr, psi, _TRUE_, storeidx);
class_store_double(dataptr, phi, _TRUE_, storeidx);
/* perturbed recombination */
class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx);
class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx);
/* Ultra relativistic species */
class_store_double(dataptr, delta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, theta_ur, pba->has_ur, storeidx);
class_store_double(dataptr, shear_ur, pba->has_ur, storeidx);
/* Interacting dark radiation */
class_store_double(dataptr, delta_idr, pba->has_idr, storeidx);
class_store_double(dataptr, theta_idr, pba->has_idr, storeidx);
if ((pba->has_idr==_TRUE_) && (ppt->idr_nature == idr_free_streaming))
class_store_double(dataptr, shear_idr, _TRUE_, storeidx);
/* Interacting dark matter */
class_store_double(dataptr, delta_idm_dr, pba->has_idm_dr, storeidx);
class_store_double(dataptr, theta_idm_dr, pba->has_idm_dr, storeidx);
/* Cold dark matter */
class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx);
class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx);
/* GDM_CLASS */
/* New fluid variables */
class_store_double(dataptr, delta_gdm, pba->has_gdm, storeidx);
class_store_double(dataptr, theta_gdm, pba->has_gdm, storeidx);
class_store_double(dataptr, shear_gdm, pba->has_gdm, storeidx);
class_store_double(dataptr, pinad_gdm, pba->has_gdm, storeidx);
/* New sources */
class_store_double(dataptr, temperC, _TRUE_, storeidx);
class_store_double(dataptr, ISW1C, _TRUE_, storeidx);
class_store_double(dataptr, ISW2C, _TRUE_, storeidx);
class_store_double(dataptr, dopplC, _TRUE_, storeidx);
class_store_double(dataptr, doppldotC, _TRUE_, storeidx);
/* END GDM_CLASS */
/* Non-cold Dark Matter */
if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) {
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, delta_p_over_delta_rho_ncdm[n_ncdm], _TRUE_, storeidx);
}
}
/* Decaying cold dark matter */
class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx);
class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx);
/* Decay radiation */
class_store_double(dataptr, delta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, theta_dr, pba->has_dr, storeidx);
class_store_double(dataptr, shear_dr, pba->has_dr, storeidx);
/* Scalar field scf*/
class_store_double(dataptr, delta_scf, pba->has_scf, storeidx);
class_store_double(dataptr, theta_scf, pba->has_scf, storeidx);
/** Fluid */
class_store_double(dataptr, ppw->delta_rho_fld, pba->has_fld, storeidx);
class_store_double(dataptr, ppw->rho_plus_p_theta_fld, pba->has_fld, storeidx);
class_store_double(dataptr, ppw->delta_p_fld, pba->has_fld, storeidx);
//fprintf(ppw->perturb_output_file,"\n");
}
/** - for tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
delta_g = y[ppw->pv->index_pt_delta_g];
shear_g = y[ppw->pv->index_pt_shear_g];
l4_g = y[ppw->pv->index_pt_delta_g+4];
pol0_g = y[ppw->pv->index_pt_pol0_g];
pol2_g = y[ppw->pv->index_pt_pol2_g];
pol4_g = y[ppw->pv->index_pt_pol0_g+4];
}
else {
delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
shear_g = 0.;
l4_g = 0.;
pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/pvecthermo[pth->index_th_dkappa]; //TBC
pol2_g = 0.;
pol4_g = 0.;
}
}
else {
delta_g = 0.;
shear_g = 0.;
l4_g = 0.;
pol0_g = 0.;
pol2_g = 0.;
pol4_g = 0.;
}
if (ppt->evolve_tensor_ur == _TRUE_){
delta_ur = y[ppw->pv->index_pt_delta_ur];
shear_ur = y[ppw->pv->index_pt_shear_ur];
l4_ur = y[ppw->pv->index_pt_delta_ur+4];
}
/** - --> Handle (re-)allocation */
if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){
class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*ppt->number_of_tensor_titles,
error_message);
ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0;
}
else{
ppt->tensor_perturbations_data[ppw->index_ikout] =
realloc(ppt->tensor_perturbations_data[ppw->index_ikout],
sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles));
}
storeidx = 0;
dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+
ppt->size_tensor_perturbation_data[ppw->index_ikout];
ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles;
//fprintf(ppw->perturb_output_file," ");
class_store_double(dataptr, tau, _TRUE_, storeidx);
class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx);
class_store_double(dataptr, delta_g, _TRUE_, storeidx);
class_store_double(dataptr, shear_g, _TRUE_, storeidx);
class_store_double(dataptr, l4_g, _TRUE_, storeidx);
class_store_double(dataptr, pol0_g, _TRUE_, storeidx);
class_store_double(dataptr, pol2_g, _TRUE_, storeidx);
class_store_double(dataptr, pol4_g, _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx);
class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx);
class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx);
class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx);
//printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur);
/* Non-cold Dark Matter */
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = ppw->pv->index_pt_psi0_ncdm1;
for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){
rho_delta_ncdm = 0.0;
rho_plus_p_theta_ncdm = 0.0;
rho_plus_p_shear_ncdm = 0.0;
delta_p_ncdm = 0.0;
factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4);
for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) {
q = pba->q_ncdm[n_ncdm][index_q];
q2 = q*q;
epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2);
rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1];
rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2];
delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx];
//Jump to next momentum bin:
idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1);
}
rho_delta_ncdm *= factor;
rho_plus_p_theta_ncdm *= k*factor;
rho_plus_p_shear_ncdm *= 2.0/3.0*factor;
delta_p_ncdm *= factor/3.;
delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm];
theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/
(ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]);
class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx);
class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx);
}
}
// fprintf(ppw->perturb_output_file,"\n");
}
if (pba->has_ncdm == _TRUE_){
free(delta_ncdm);
free(theta_ncdm);
free(shear_ncdm);
free(delta_p_over_delta_rho_ncdm);
}
return _SUCCESS_;
}
/**
* Compute derivative of all perturbations to be integrated
*
* For each mode (scalar/vector/tensor) and each wavenumber k, this
* function computes the derivative of all values in the vector of
* perturbed variables to be integrated.
*
* This is one of the few functions in the code which is passed to the generic_integrator() routine.
* Since generic_integrator() should work with functions passed from various modules, the format of the arguments
* is a bit special:
* - fixed parameters and workspaces are passed through a generic pointer.
* generic_integrator() doesn't know what the content of this pointer is.
* - errors are not written as usual in pth->error_message, but in a generic
* error_message passed in the list of arguments.
*
* @param tau Input: conformal time
* @param y Input: vector of perturbations
* @param dy Output: vector of its derivatives (already allocated)
* @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau.
* @param error_message Output: error message
*/
int perturb_derivs(double tau,
double * y,
double * dy,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* multipole */
int l;
/* scale factor and other background quantities */
double a,a2,a_prime_over_a,R;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
int index_md;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
double * s_l;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double delta_idr=0., theta_idr=0.;
double cb2,cs2,ca2,delta_p_b_over_rho_b;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.;
/* perturbed recombination (just to simplify the notation) */
double H0=0.,Nnow=0.,n_H=0.,fHe=0.;
double delta_temp=0.,delta_chi=0., chi=0.;
double alpha_rec=0.,delta_alpha_rec=0.;
double a_rad=0., Compton_CR =0.;
double Tb_in_K=0.;
/* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */
double P0,P1,P2;
/* for use with fluid (fld): */
double w_fld,dw_over_da_fld,w_prime_fld,integral_fld;
/* for use with non-cold dark matter (ncdm): */
int index_q,n_ncdm,idx;
double q,epsilon,dlnf0_dlnq,qk_div_epsilon;
double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.;
/* for use with curvature */
double cotKgen, sqrt_absK;
double s2_squared, ssqrt3;
/* for use with dcdm and dr */
double f_dr, fprime_dr;
double Sinv=0., dmu_idm_dr=0., dmu_idr=0., tca_slip_idm_dr=0.;
/* GDM_CLASS */
double shear_gdm=0.,pinad_gdm=0.;
double w_gdm,ca2_gdm,cs2_gdm,cv2_gdm;
/* END GDM_CLASS */
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
index_md = pppaw->index_md;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
s_l = ppw->s_l;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - get background/thermo quantities in this point */
class_call(background_at_tau(pba,
tau,
pba->normal_info,
pba->inter_closeby,
&(ppw->last_index_back),
pvecback),
pba->error_message,
error_message);
class_call(thermodynamics_at_z(pba,
pth,
1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */
pth->inter_closeby,
&(ppw->last_index_thermo),
pvecback,
pvecthermo),
pth->error_message,
error_message);
/** - get metric perturbations with perturb_einstein() */
class_call(perturb_einstein(ppr,
pba,
pth,
ppt,
index_md,
k,
tau,
y,
ppw),
ppt->error_message,
error_message);
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a2 = a*a;
a_prime_over_a = pvecback[pba->index_bg_H] * a;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
if((pba->has_idm_dr==_TRUE_)){
Sinv = 4./3. * pvecback[pba->index_bg_rho_idr]/ pvecback[pba->index_bg_rho_idm_dr];
dmu_idm_dr = pvecthermo[pth->index_th_dmu_idm_dr];
dmu_idr = pth->b_idr/pth->a_idm_dr*pba->Omega0_idr/pba->Omega0_idm_dr*dmu_idm_dr;
}
/** - Compute 'generalised cotK function of argument \f$ \sqrt{|K|}*\tau \f$, for closing hierarchy.
(see equation 2.34 in arXiv:1305.3261): */
if (pba->has_curvature == _FALSE_){
cotKgen = 1.0/(k*tau);
}
else{
sqrt_absK = sqrt(fabs(pba->K));
if (pba->K < 0)
cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau);
else
cotKgen = sqrt_absK/k/tan(sqrt_absK*tau);
}
s2_squared = 1.-3.*pba->K/k2;
/** - for scalar modes: */
if (_scalars_) {
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off){
delta_idr = y[pv->index_pt_delta_idr];
theta_idr = y[pv->index_pt_theta_idr];
}
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
delta_p_b_over_rho_b = cb2*delta_b; /* for baryons, (delta p)/rho with Ma & Bertschinger approximation: sound speed = adiabatic sound speed */
/** - --> (b) perturbed recombination **/
if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){
delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp];
delta_p_b_over_rho_b = pvecthermo[pth->index_th_wb]*(delta_b+delta_temp); /* for baryons, (delta p)/rho with sound speed from arXiv:0707.2727 */
delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi];
chi=pvecthermo[pth->index_th_xe];
// Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs)
H0 = pba->H0 * _c_ / _Mpc_over_m_;
//Computation of Nnow in SI units
Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_);
// total amount of hydrogen today
n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow;
// Helium-to-hydrogen ratio
fHe = pth->YHe / (_not4_*(1-pth->YHe));
// The constant such that rho_gamma = a_rad * T^4
a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3);
// Compton cooling rate in Mpc^(-1)
Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ;
// Temperature is already in Kelvin
Tb_in_K = pvecthermo[pth->index_th_Tb];
// Alpha in m^3/s, cf. Recfast paper
alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ;
// delta alpha, dimensionless
delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp;
} // end of perturbed recombination related quantities
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
//metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.;
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
//metric_shear_prime = 0.;
metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime];
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) {
delta_g = ppw->rsa_delta_g;
theta_g = ppw->rsa_theta_g;
}
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on){
delta_idr = ppw->rsa_delta_idr;
theta_idr = ppw->rsa_theta_idr;
}
}
/** - --> (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION */
/** - ---> photon temperature density */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity);
}
/** - ---> baryon density */
dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity);
/** - ---> baryon velocity (depends on tight-coupling approximation=tca) */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/* without tca */
/** - ----> perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
- a_prime_over_a*theta_b
+ metric_euler
+ k2*delta_p_b_over_rho_b
+ R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b);
}
else {
/* with tca */
class_call(perturb_tca_slip_and_shear(y,pppaw,error_message),
error_message,
error_message);
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_b] =
(-a_prime_over_a*theta_b
+k2*(delta_p_b_over_rho_b+R*(delta_g/4.-s2_squared*ppw->tca_shear_g))
+R*ppw->tca_slip)/(1.+R)
+metric_euler;
}
/** - ---> photon temperature higher momenta and photon polarization (depend on tight-coupling approximation) */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - ----> if photon tight-coupling is off */
if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) {
/** - -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */
P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.;
/** - -----> photon temperature velocity */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g])
+ metric_euler
+ pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/** - -----> photon temperature shear */
dy[pv->index_pt_shear_g] =
0.5*(8./15.*(theta_g+metric_shear)
-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g]
-pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0));
/** - -----> photon temperature l=3 */
l = 3;
dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)*
(l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/** - -----> photon temperature l>3 */
for (l = 4; l < pv->l_max_g; l++) {
dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)*
(l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
}
/** - -----> photon temperature lmax */
l = pv->l_max_g; /* l=lmax */
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/** - -----> photon polarization l=0 */
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0);
/** - -----> photon polarization l=1 */
dy[pv->index_pt_pol1_g] =
k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g];
/** - -----> photon polarization l=2 */
dy[pv->index_pt_pol2_g] =
k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1])
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0);
/** - -----> photon polarization l>2 */
for (l=3; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/** - -----> photon polarization lmax_pol */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
/** - ----> if photon tight-coupling is on: */
else {
/** - -----> in that case, only need photon velocity */
/* perturbed recombination has an impact **/
dy[pv->index_pt_theta_g] =
-(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-k2*delta_p_b_over_rho_b)/R
+k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler;
}
}
/** - ---> cdm */
if (pba->has_cdm == _TRUE_) {
/** - ----> newtonian gauge: cdm density and velocity */
if (ppt->gauge == newtonian) {
dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */
dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */
}
/** - ----> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */
}
}
/** - ---> idr */
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off) {
dy[pv->index_pt_delta_idr] = -4./3.*(theta_idr + metric_continuity);
}
}
/** - ---> idm_dr */
if (pba->has_idm_dr == _TRUE_){
dy[pv->index_pt_delta_idm_dr] = -(y[pv->index_pt_theta_idm_dr]+metric_continuity); /* idm_dr density */
if (ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off) {
dy[pv->index_pt_theta_idm_dr] = - a_prime_over_a*y[pv->index_pt_theta_idm_dr] + metric_euler; /* idm_dr velocity */
dy[pv->index_pt_theta_idm_dr] -= (Sinv*dmu_idm_dr*(y[pv->index_pt_theta_idm_dr] - theta_idr) - k2*pvecthermo[pth->index_th_cidm_dr2]*y[pv->index_pt_delta_idm_dr]);
}
else{
tca_slip_idm_dr = (pth->nindex_idm_dr-2./(1.+Sinv))*a_prime_over_a*(y[pv->index_pt_theta_idm_dr]-theta_idr) + 1./(1.+Sinv)/dmu_idm_dr*
(-(pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a) *y[pv->index_pt_theta_idm_dr] - a_prime_over_a *
(.5*k2*delta_idr + metric_euler) + k2*(pvecthermo[pth->index_th_cidm_dr2]*dy[pv->index_pt_delta_idm_dr] - 1./4.*dy[pv->index_pt_delta_idr]));
ppw->tca_shear_idm_dr = 0.5*8./15./dmu_idm_dr/ppt->alpha_idm_dr[0]*(y[pv->index_pt_theta_idm_dr]+metric_shear);
dy[pv->index_pt_theta_idm_dr] = 1./(1.+Sinv)*(- a_prime_over_a*y[pv->index_pt_theta_idm_dr] + k2*pvecthermo[pth->index_th_cidm_dr2]*
y[pv->index_pt_delta_idm_dr] + k2*Sinv*(delta_idr/4. - ppw->tca_shear_idm_dr)) + metric_euler + Sinv/(1.+Sinv)*tca_slip_idm_dr;
}
}
/* perturbed recombination */
/* computes the derivatives of delta x_e and delta T_b */
if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){
/* alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec */
dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ;
/* see the documentation for this formula */
dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR
* pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe))
+ pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) );
}
/** - ---> dcdm and dr */
if (pba->has_dcdm == _TRUE_) {
/** - ----> dcdm */
dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity)
- a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */
dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */
}
/** - ---> dr */
if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) {
/* f = rho_dr*a^4/rho_crit_today. In CLASS density units
rho_crit_today = H0^2.
*/
f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr];
fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2);
/** - ----> dr F0 */
dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+
fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2);
/** - ----> dr F1 */
dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared +
4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm];
/** - ----> exact dr F2 */
dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3];
/** - ----> exact dr l=3 */
l = 3;
dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)*
(l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]);
/** - ----> exact dr l>3 */
for (l = 4; l < pv->l_max_dr; l++) {
dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]);
}
/** - ----> exact dr lmax_dr */
l = pv->l_max_dr;
dy[pv->index_pt_F0_dr+l] =
k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]);
}
/** -> GDM_CLASS: generalized dark matter (gdm) */
if (pba->has_gdm == _TRUE_) {
/** ---> factors w, adiabatic sound speed ca2 (all background-related),
plus actual sound speed in the fluid rest frame cs2 and cv2*/
w_gdm = ppw->pvecback[pba->index_bg_w_gdm];
ca2_gdm = ppw->pvecback[pba->index_bg_ca2_gdm];
cs2_gdm = cs2_gdm_of_a_and_k(pba,a,k,ppw);
cv2_gdm = cv2_gdm_of_a_and_k(pba,a,k);
/** ---> specify whether \Pi_{nad} is k-independent or k^2 dependent*/
pinad_gdm = (cs2_gdm-ca2_gdm)*( y[pv->index_pt_delta_gdm] + 3*a_prime_over_a*(1.+w_gdm)*y[pv->index_pt_theta_gdm]/k2);
if (ppt->k2_Pinad_gdm == _TRUE_) {
pinad_gdm = pinad_gdm*k2;
}
/** ---> fluid density (rewritten in terms of \Pi_{nad}) */
dy[pv->index_pt_delta_gdm] =
-(1.+w_gdm)*(y[pv->index_pt_theta_gdm]+metric_continuity)
+3.*a_prime_over_a*((w_gdm-ca2_gdm)*y[pv->index_pt_delta_gdm] - pinad_gdm) ;
/** ---> fluid velocity (added here the shear_gdm) */
if (ppt->dynamic_shear_gdm == _TRUE_) {
shear_gdm = y[pv->index_pt_shear_gdm];
}
else {
shear_gdm = ppw->pvecmetric[ppw->index_mt_shear_gdm];
}
dy[pv->index_pt_theta_gdm] = // fluid velocity (rewritten in terms of \Pi_{nad})
-(1.-3.*ca2_gdm)*a_prime_over_a*y[pv->index_pt_theta_gdm]
+k2/(1.+w_gdm)*(ca2_gdm*y[pv->index_pt_delta_gdm]+pinad_gdm)
+metric_euler - s2_squared*k2*shear_gdm;
/** ---> fluid shear */
if (ppt->dynamic_shear_gdm == _TRUE_) {
dy[pv->index_pt_shear_gdm] = /* fluid shear */
-3.*a_prime_over_a*y[pv->index_pt_shear_gdm]
+8./3.*cv2_gdm/(1.+w_gdm)*(y[pv->index_pt_theta_gdm] + metric_shear);
}
}
/* END GDM_CLASS */
/** - ---> fluid (fld) */
if (pba->has_fld == _TRUE_) {
if (pba->use_ppf == _FALSE_){
/** - ----> factors w, w_prime, adiabatic sound speed ca2 (all three background-related),
plus actual sound speed in the fluid rest frame cs2 */
class_call(background_w_fld(pba,a,&w_fld,&dw_over_da_fld,&integral_fld), pba->error_message, ppt->error_message);
w_prime_fld = dw_over_da_fld * a_prime_over_a * a;
ca2 = w_fld - w_prime_fld / 3. / (1.+w_fld) / a_prime_over_a;
cs2 = pba->cs2_fld;
/** - ----> fluid density */
dy[pv->index_pt_delta_fld] =
-(1+w_fld)*(y[pv->index_pt_theta_fld]+metric_continuity)
-3.*(cs2-w_fld)*a_prime_over_a*y[pv->index_pt_delta_fld]
-9.*(1+w_fld)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2;
/** - ----> fluid velocity */
dy[pv->index_pt_theta_fld] = /* fluid velocity */
-(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld]
+cs2*k2/(1.+w_fld)*y[pv->index_pt_delta_fld]
+metric_euler;
}
else {
dy[pv->index_pt_Gamma_fld] = ppw->Gamma_prime_fld; /* Gamma variable of PPF formalism */
}
}
/** - ---> scalar field (scf) */
if (pba->has_scf == _TRUE_) {
/** - ----> field value */
dy[pv->index_pt_phi_scf] = y[pv->index_pt_phi_prime_scf];
/** - ----> Klein Gordon equation */
dy[pv->index_pt_phi_prime_scf] = - 2.*a_prime_over_a*y[pv->index_pt_phi_prime_scf]
- metric_continuity*pvecback[pba->index_bg_phi_prime_scf] // metric_continuity = h'/2
- (k2 + a2*pvecback[pba->index_bg_ddV_scf])*y[pv->index_pt_phi_scf]; //checked
}
/** - ---> interacting dark radiation */
if (pba->has_idr == _TRUE_){
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_off) {
if ((pba->has_idm_dr == _FALSE_)||((pba->has_idm_dr == _TRUE_)&&(ppw->approx[ppw->index_ap_tca_idm_dr] == (int)tca_idm_dr_off))) {
/** - ----> idr velocity */
if(ppt->idr_nature == idr_free_streaming)
dy[pv->index_pt_theta_idr] = k2*(y[pv->index_pt_delta_idr]/4.-s2_squared*y[pv->index_pt_shear_idr]) + metric_euler;
else
dy[pv->index_pt_theta_idr] = k2/4. * y[pv->index_pt_delta_idr] + metric_euler;
if (pba->has_idm_dr == _TRUE_)
dy[pv->index_pt_theta_idr] += dmu_idm_dr*(y[pv->index_pt_theta_idm_dr]-y[pv->index_pt_theta_idr]);
if(ppt->idr_nature == idr_free_streaming){
/** - ----> exact idr shear */
l = 2;
dy[pv->index_pt_shear_idr] = 0.5*(8./15.*(y[pv->index_pt_theta_idr]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_idr+1]);
if (pba->has_idm_dr == _TRUE_)
dy[pv->index_pt_shear_idr]-= (ppt->alpha_idm_dr[l-2]*dmu_idm_dr + ppt->beta_idr[l-2]*dmu_idr)*y[pv->index_pt_shear_idr];
/** - ----> exact idr l=3 */
l = 3;
dy[pv->index_pt_l3_idr] = k/(2.*l+1.)*(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_idr]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_idr+1]);
if (pba->has_idm_dr == _TRUE_)
dy[pv->index_pt_l3_idr]-= (ppt->alpha_idm_dr[l-2]*dmu_idm_dr + ppt->beta_idr[l-2]*dmu_idr)*y[pv->index_pt_l3_idr];
/** - ----> exact idr l>3 */
for (l = 4; l < pv->l_max_idr; l++) {
dy[pv->index_pt_delta_idr+l] = k/(2.*l+1)*(l*s_l[l]*y[pv->index_pt_delta_idr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_idr+l+1]);
if (pba->has_idm_dr == _TRUE_)
dy[pv->index_pt_delta_idr+l]-= (ppt->alpha_idm_dr[l-2]*dmu_idm_dr + ppt->beta_idr[l-2]*dmu_idr)*y[pv->index_pt_delta_idr+l];
}
/** - ----> exact idr lmax_dr */
l = pv->l_max_idr;
dy[pv->index_pt_delta_idr+l] = k*(s_l[l]*y[pv->index_pt_delta_idr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_idr+l]);
if (pba->has_idm_dr == _TRUE_)
dy[pv->index_pt_delta_idr+l]-= (ppt->alpha_idm_dr[l-2]*dmu_idm_dr + ppt->beta_idr[l-2]*dmu_idr)*y[pv->index_pt_delta_idr+l];
}
}
else{
dy[pv->index_pt_theta_idr] = 1./(1.+Sinv)*(- a_prime_over_a*y[pv->index_pt_theta_idm_dr] + k2*pvecthermo[pth->index_th_cidm_dr2]*y[pv->index_pt_delta_idm_dr]
+ k2*Sinv*(1./4.*y[pv->index_pt_delta_idr] - ppw->tca_shear_idm_dr)) + metric_euler - 1./(1.+Sinv)*tca_slip_idm_dr;
}
}
}
/** - ---> ultra-relativistic neutrino/relics (ur) */
if (pba->has_ur == _TRUE_) {
/** - ----> if radiation streaming approximation is off */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
/** - -----> ur density */
dy[pv->index_pt_delta_ur] =
// standard term
-4./3.*(y[pv->index_pt_theta_ur] + metric_continuity)
// non-standard term, non-zero if if ceff2_ur not 1/3
+(1.-ppt->three_ceff2_ur)*a_prime_over_a*(y[pv->index_pt_delta_ur] + 4.*a_prime_over_a*y[pv->index_pt_theta_ur]/k/k);
/** - -----> ur velocity */
dy[pv->index_pt_theta_ur] =
// standard term with extra coefficient (3 ceff2_ur), normally equal to one
k2*(ppt->three_ceff2_ur*y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler
// non-standard term, non-zero if ceff2_ur not 1/3
-(1.-ppt->three_ceff2_ur)*a_prime_over_a*y[pv->index_pt_theta_ur];
if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) {
/** - -----> exact ur shear */
dy[pv->index_pt_shear_ur] =
0.5*(
// standard term
8./15.*(y[pv->index_pt_theta_ur]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]
// non-standard term, non-zero if cvis2_ur not 1/3
-(1.-ppt->three_cvis2_ur)*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear)));
/** - -----> exact ur l=3 */
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
/** - -----> exact ur l>3 */
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
/** - -----> exact ur lmax_ur */
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
else {
/** - -----> in fluid approximation (ufa): only ur shear needed */
//TBC: curvature?
/* a la Ma & Bertschinger */
if (ppr->ur_fluid_approximation == ufa_mb) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la Hu */
if (ppr->ur_fluid_approximation == ufa_hu) {
dy[pv->index_pt_shear_ur] =
-3.*a_prime_over_a*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_shear);
}
/* a la CLASS */
if (ppr->ur_fluid_approximation == ufa_CLASS) {
dy[pv->index_pt_shear_ur] =
-3./tau*y[pv->index_pt_shear_ur]
+2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class);
}
}
}
}
/** - ---> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (pba->has_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ----> first case: use a fluid approximation (ncdmfa) */
//TBC: curvature
if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> define intermediate quantitites */
rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */
p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */
pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */
w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* equation of state parameter */
ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */
/* c_eff is (delta p / delta rho) in the gauge under
consideration (not in the gauge comoving with the
fluid) */
/* c_vis is introduced in order to close the system */
/* different ansatz for sound speed c_eff and viscosity speed c_vis */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = w_ncdm;
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
ceff2_ncdm = ca2_ncdm;
cvis2_ncdm = 3.*w_ncdm*ca2_ncdm;
}
/** - -----> exact continuity equation */
dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)-
3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx];
/** - -----> exact euler equation */
dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+
ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2]
+ metric_euler;
/** - -----> different ansatz for approximate shear derivative */
if (ppr->ncdm_fluid_approximation == ncdmfa_mb) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_hu) {
dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear);
}
if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) {
dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2]
+8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class);
}
/** - -----> jump to next species */
idx += pv->l_max_ncdm[n_ncdm]+1;
}
}
/** - ----> second case: use exact equation (Boltzmann hierarchy on momentum grid) */
else {
/** - -----> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - -----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - -----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - -----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.;
/** - -----> ncdm velocity for given momentum bin */
dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2])
-epsilon*metric_euler/(3*q*k)*dlnf0_dlnq;
/** - -----> ncdm shear for given momentum bin */
dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3])
-s_l[2]*metric_shear*2./15.*dlnf0_dlnq;
/** - -----> ncdm l>3 for given momentum bin */
for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - -----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
}
/** - ---> metric */
/** - ---> eta of synchronous gauge */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime];
}
if (ppt->gauge == newtonian) {
dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime];
}
}
/** - vector mode */
if (_vectors_) {
fprintf(stderr,"we are in vectors\n");
ssqrt3 = sqrt(1.-2.*pba->K/k2);
cb2 = pvecthermo[pth->index_th_cb2];
/** - --> baryon velocity */
if (ppt->gauge == synchronous) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]);
}
else if (ppt->gauge == newtonian) {
dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b]
- _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
+ pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V];
}
/*
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
*/
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/
P1 = -_SQRT6_/40.*(
4./(3.*k)*theta_g //F1
+y[pv->index_pt_delta_g+3]
+2.*y[pv->index_pt_pol0_g]
+10./7.*y[pv->index_pt_pol2_g]
-4./7.*y[pv->index_pt_pol0_g+4]);
if (ppt->gauge == synchronous) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]);
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1)
+4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime];
}
else if (ppt->gauge == newtonian) {
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b])
-2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime];
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1);
}
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/*
}
}
*/
if (ppt->gauge == synchronous) {
/* Vector metric perturbation in synchronous gauge: */
dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime];
}
else if (ppt->gauge == newtonian){
/* Vector metric perturbation in Newtonian gauge: */
dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime];
}
}
/** - tensor modes: */
if (_tensors_) {
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) {
/* short-cut notations for the tensor perturbations */
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
shear_g = y[pv->index_pt_shear_g];
/* (P^{(2)}) */
P2 =-1.0/_SQRT6_*(
1./10.*delta_g
+2./7.*shear_g
+3./70.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./7.*y[pv->index_pt_pol2_g]
-3./70.*y[pv->index_pt_pol0_g+4]);
/* above expression from paper, expression below matches old class but is not correct
P2 = -1.0/_SQRT6_*(
1./10.*delta_g
+2./35.*shear_g
+1./210.*y[pv->index_pt_delta_g+4]
-3./5.*y[pv->index_pt_pol0_g]
+6./35.*y[pv->index_pt_pol2_g]
-1./210.*y[pv->index_pt_pol0_g+4]
);
*/
/* photon density (delta_g = F_0) */
dy[pv->index_pt_delta_g] =
-4./3.*theta_g
-pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2)
//+y[pv->index_pt_gwdot];
+_SQRT6_*y[pv->index_pt_gwdot]; //TBC
/* photon velocity (theta_g = (3k/4)*F_1) */
dy[pv->index_pt_theta_g] =
k2*(delta_g/4.-s_l[2]*shear_g)
-pvecthermo[pth->index_th_dkappa]*theta_g;
/* photon shear (shear_g = F_2/2) */
dy[pv->index_pt_shear_g] =
4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1]
-pvecthermo[pth->index_th_dkappa]*shear_g;
/* photon l=3 */
dy[pv->index_pt_l3_g] =
k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g];
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=4; l < pv->l_max_g; l++)
dy[pv->index_pt_delta_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* l=lmax */
l = pv->l_max_g;
dy[pv->index_pt_delta_g+l] =
k*(s_l[l]*y[pv->index_pt_delta_g+l-1]
-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l])
- pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l];
/* photon polarization, l=0 (pol0_g = G_0)*/
dy[pv->index_pt_pol0_g] =
-k*y[pv->index_pt_pol0_g+1]
-pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2);
/* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */
for (l=1; l < pv->l_max_pol_g; l++)
dy[pv->index_pt_pol0_g+l] =
k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
/* l=lmax */
l = pv->l_max_pol_g;
dy[pv->index_pt_pol0_g+l] =
k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]
-(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l])
-pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l];
}
}
if (ppt->evolve_tensor_ur == _TRUE_) {
dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot];
dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]);
dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur]
-3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]);
l = 3;
dy[pv->index_pt_l3_ur] = k/(2.*l+1.)*
(l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]);
for (l = 4; l < pv->l_max_ur; l++) {
dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)*
(l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]);
}
l = pv->l_max_ur;
dy[pv->index_pt_delta_ur+l] =
k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]);
}
/** - --> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */
//TBC: curvature in all ncdm
if (ppt->evolve_tensor_ncdm == _TRUE_) {
idx = pv->index_pt_psi0_ncdm1;
/** - ---> loop over species */
for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) {
/** - ----> loop over momentum */
for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) {
/** - ----> define intermediate quantities */
dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q];
q = pba->q_ncdm[n_ncdm][index_q];
epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]);
qk_div_epsilon = k*q/epsilon;
/** - ----> ncdm density for given momentum bin */
dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq;
/** - ----> ncdm l>0 for given momentum bin */
for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){
dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]);
}
/** - ----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger)
but with curvature taken into account a la arXiv:1305.3261 */
dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l];
/** - ----> jump to next momentum bin or species */
idx += (pv->l_max_ncdm[n_ncdm]+1);
}
}
}
/** - --> tensor metric perturbation h (gravitational waves) */
dy[pv->index_pt_gw] = y[pv->index_pt_gwdot];
/** - --> its time-derivative */
dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime];
}
return _SUCCESS_;
}
/**
* Compute the baryon-photon slip (theta_g - theta_b)' and the photon
* shear in the tight-coupling approximation
*
* @param y Input: vector of perturbations
* @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, slip and shear
* @param error_message Output: error message
*/
int perturb_tca_slip_and_shear(double * y,
void * parameters_and_workspace,
ErrorMsg error_message
) {
/** Summary: */
/** - define local variables */
/* scale factor and other background quantities */
double a,a_prime_over_a,a_primeprime_over_a,R;
/* useful terms for tight-coupling approximation */
double slip=0.;
double tau_c=0.,dtau_c=0.;
double theta_prime,shear_g_prime=0.,theta_prime_prime;
double g0,g0_prime,g0_prime_prime;
double F=0.,F_prime=0.,F_prime_prime=0.;
/* short-cut names for the fields of the input structure */
struct perturb_parameters_and_workspace * pppaw;
double k,k2;
struct precision * ppr;
struct background * pba;
struct thermo * pth;
struct perturbs * ppt;
struct perturb_workspace * ppw;
double * pvecback;
double * pvecthermo;
double * pvecmetric;
struct perturb_vector * pv;
/* short-cut notations for the perturbations */
double delta_g=0.,theta_g=0.,shear_g=0.;
double delta_b,theta_b;
double Delta;
double cb2;
double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.;
/* for use with curvature */
double s2_squared;
/** - rename the fields of the input structure (just to avoid heavy notations) */
pppaw = parameters_and_workspace;
k = pppaw->k;
k2=k*k;
ppr = pppaw->ppr;
pba = pppaw->pba;
pth = pppaw->pth;
ppt = pppaw->ppt;
ppw = pppaw->ppw;
pvecback = ppw->pvecback;
pvecthermo = ppw->pvecthermo;
pvecmetric = ppw->pvecmetric;
pv = ppw->pv;
/** - compute related background quantities */
a = pvecback[pba->index_bg_a];
a_prime_over_a = pvecback[pba->index_bg_H] * a;
a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a;
//z = pba->a_today-1.;
R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b];
s2_squared = 1.-3.*pba->K/k2;
/** - --> (a) define short-cut notations for the scalar perturbations */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) {
delta_g = y[pv->index_pt_delta_g];
theta_g = y[pv->index_pt_theta_g];
}
delta_b = y[pv->index_pt_delta_b];
theta_b = y[pv->index_pt_theta_b];
cb2 = pvecthermo[pth->index_th_cb2];
/* during TCA one can show that sound speed = adiabatic sound speed,
so no need to take into account corrections from perturbed
recombination here */
/** - --> (b) define short-cut notations used only in tight-coupling approximation */
tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */
dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */
F = tau_c/(1+R); /* F = tau_c/(1+R) */
if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) {
F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */
+ 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R)
+2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R)
+tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R);
}
}
/** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below)
- Each continuity equation contains a term in (theta+metric_continuity) with
metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge
- Each Euler equation contains a source term metric_euler with
metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge
- Each shear derivative equation contains a source term metric_shear equal to
metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge
- metric_shear_prime is the derivative of metric_shear
- In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge,
(-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */
if (ppt->gauge == synchronous) {
metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.;
metric_euler = 0.;
metric_shear = k2 * pvecmetric[ppw->index_mt_alpha];
metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime];
}
if (ppt->gauge == newtonian) {
metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime];
metric_euler = k2*pvecmetric[ppw->index_mt_psi];
metric_shear = 0.;
metric_shear_prime = 0.;
}
/** - --> (d) if some approximation schemes are turned on, enforce a few y[ ] values computed in perturb_einstein */
/* free-streaming photon velocity */
if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)
theta_g = ppw->rsa_theta_g;
/** - ---> like Ma & Bertschinger */
if (ppr->tight_coupling_approximation == (int)first_order_MB) {
slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> relax assumption dkappa~a\f$^{-2}\f$ (like in CAMB) */
if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) {
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> also relax assumption cb2~a\f$^{-1}\f$ */
if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){
slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g)
+F*(-a_primeprime_over_a*theta_b
+k2*(-a_prime_over_a*delta_g/2.
+pvecthermo[pth->index_th_dcb2]*delta_b
+cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.)
-a_prime_over_a*metric_euler);
}
/** - ---> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */
shear_g=16./45.*tau_c*(theta_g+metric_shear);
/* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3)
because they didn't include the contribution of G_gamma0
and G_gamma2, which are of the same order as sigma_g. This
was already consistently included in CAMB) */
/** - ---> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */
theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*delta_b+R/4.*delta_g))/(1.+R) + metric_euler;
/** - ---> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */
shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear));
/** - ---> 2nd order as in CRS*/
if (ppr->tight_coupling_approximation == (int)second_order_CRS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
class_test(pba->sgnK != 0,
ppt->error_message,
"the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme");
/* infer Delta from h'' using Einstein equation */
Delta = 2*k2*y[pv->index_pt_eta]
-2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime]
-pvecmetric[ppw->index_mt_h_prime_prime];
/* monster expression for slip at second-order in tight-coupling */
slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g)
+(-a_primeprime_over_a*theta_b
-k2*a_prime_over_a*(delta_g/2.-2.*shear_g)
+k2*(cb2*(-theta_b-metric_continuity)
-4./3.*(-theta_g-metric_continuity)/4.
+shear_g_prime)
)/pvecthermo[pth->index_th_dkappa]/(1.+R)
-2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a)
/(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa]
+(
a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R)
+a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R)
+a_primeprime_over_a*k2*cb2*delta_b/(1.+R)
+k2*k2*(3.*cb2-1.)*cb2*delta_b/3./(1.+R)
+k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R)
+a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R)
+a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R)
+a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R)
+a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R)
+a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3.
+k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3.
+2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime]
+k2*(1.-3.*cb2)*Delta/6.
)/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R)
-(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]
+4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]);
}
}
/** - ---> 2nd order like in CLASS paper */
if (ppr->tight_coupling_approximation == (int)second_order_CLASS) {
if (ppt->gauge == newtonian) {
class_stop(error_message,
"the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme");
}
if (ppt->gauge == synchronous) {
/* zero order for theta_b'' = theta_g'' */
theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R);
/* zero-order quantities g0, g0', go'' */
g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.);
g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime]));
g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime
-(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b
+k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime]));
/* slip at second order */
slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime)
-F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime);
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
}
/** - ---> add only the most important 2nd order terms */
if (ppr->tight_coupling_approximation == (int)compromise_CLASS) {
/* slip at second order (only leading second-order terms) */
slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b));
/* second-order correction to shear */
shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime);
}
/** - ---> store tight-coupling values of photon shear and its derivative */
ppw->tca_shear_g = shear_g;
ppw->tca_slip = slip;
return _SUCCESS_;
}
/**
* Compute the density delta and velocity theta of photons and
* ultra-relativistic neutrinos in the radiation streaming
* approximation
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @param k Input: wavenumber
* @param y Input: vector of perturbations
* @param a_prime_over_a Input: a'/a
* @param pvecthermo Input: vector of thermodynamics quantites
* @param ppw Input/Output: in input, fixed parameters (e.g. indices); in output, delta and theta
* @param error_message Output: error message
*/
int perturb_rsa_delta_and_theta(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
double k,
double * y,
double a_prime_over_a,
double * pvecthermo,
struct perturb_workspace * ppw
) {
/* - define local variables */
double k2;
k2 = k*k;
class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_off,
"this function should not have been called now, bug was introduced",
ppt->error_message,
ppt->error_message);
// formulas below TBC for curvaturema
/* newtonian gauge */
if (ppt->gauge == newtonian) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b];
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
+k2*y[ppw->pv->index_pt_phi]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
}
}
/* synchronous gauge */
if (ppt->gauge == synchronous) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_g = 0.;
ppw->rsa_theta_g = 0.;
}
else {
ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) {
ppw->rsa_delta_g +=
-4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]);
ppw->rsa_theta_g +=
3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*
(y[ppw->pv->index_pt_theta_b]
+0.5*ppw->pvecmetric[ppw->index_mt_h_prime])
+ppw->pvecthermo[pth->index_th_dkappa]*
(-a_prime_over_a*y[ppw->pv->index_pt_theta_b]
+ ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b]
-a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
+k2*y[ppw->pv->index_pt_eta]));
}
if (pba->has_ur == _TRUE_) {
if (ppr->radiation_streaming_approximation == rsa_null) {
ppw->rsa_delta_ur = 0.;
ppw->rsa_theta_ur = 0.;
}
else {
ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
}
}
/* update total delta and theta given rsa approximation results */
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_delta_g;
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g;
if (pba->has_ur == _TRUE_) {
ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_delta_ur;
ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur;
}
return _SUCCESS_;
}
/**
* Compute the density delta and velocity theta of interacting dark
* radiation in its streaming approximation
*
* @param ppr Input: pointer to precision structure
* @param pba Input: pointer to background structure
* @param pth Input: pointer to thermodynamics structure
* @param ppt Input: pointer to perturbation structure
* @param k Input: wavenumber
* @param y Input: vector of perturbations
* @param a_prime_over_a Input: a'/a
* @param pvecthermo Input: vector of thermodynamics quantites
* @param ppw Input/Output: in input, fixed parameters (e.g. indices); in output, delta and theta
* @param error_message Output: error message
*/
int perturb_rsa_idr_delta_and_theta(
struct precision * ppr,
struct background * pba,
struct thermo * pth,
struct perturbs * ppt,
double k,
double * y,
double a_prime_over_a,
double * pvecthermo,
struct perturb_workspace * ppw
) {
/* - define local variables */
double k2;
k2 = k*k;
// formulas below TBC for curvaturema
/* newtonian gauge */
if (ppt->gauge == newtonian) {
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on) {
ppw->rsa_delta_idr = -4.*y[ppw->pv->index_pt_phi];
ppw->rsa_theta_idr = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime];
}
}
if (ppt->gauge == synchronous) {
if (ppw->approx[ppw->index_ap_rsa_idr] == (int)rsa_idr_on) {
ppw->rsa_delta_idr = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime]
-k2*y[ppw->pv->index_pt_eta]);
ppw->rsa_theta_idr = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime];
}
}
return _SUCCESS_;
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////
/* GDM_CLASS : new set of GDM-specific functions */
///////////////////////////////////////////////////
/** Here come the functions cs2_gdm_of_a_and_k and cv2_gdm_of_a_and_k that
calculate the sound speed and viscosity for different GDM parametrisations.
And some additional functions needed. See the comments in GDM_explanatory.ini for
how to use those functions from the parameter files.
*/
// the smooth cs2 and cv2 bins in smooth_bins_gdm
double c2_piece(double lnap,
double c11,
double c12) {
return (c11 + c12 + erf(lnap)*(c12 - c11))/2.;
}
// use this function only for the case pixel_params_fluid. It calculates either cs2 or cv2 for the pixel case.
double twoD_pixel(struct background *pba,
double a,
double k, // unused dummy argument
double c_values_gdm[_MAX_NUMBER_OF_TIME_BINS_]) {
double a_rel = a / pba->a_today;
double previous_time=0.;
double twoD=0;
int i;
/*--> smooth bins case */
if (pba->smooth_bins_gdm == _TRUE_) {
double timetable[pba->time_bins_num_gdm]; //stores the stitching times
double timeratios[pba->time_bins_num_gdm]; //needed for the transition width awidth
double awidth;
double lnap; // ln((a/ atrans)^awidth), the arguments of the error function
//calculate the geometric mean of pixel centers (=algebraic mean for lna)
//because w_piece and rho_piece are stitched together at those times.
for (i=0; i < pba->time_bins_num_gdm -1; i++) {
timetable[i]= sqrt(pba->time_values_gdm[i]*pba->time_values_gdm[i+1]);
timetable[pba->time_bins_num_gdm-2]=pba->time_values_gdm[pba->time_bins_num_gdm-1]; //replace the last entry by the final bin end
timeratios[i]=log(timetable[i]/pba->time_values_gdm[i]);
}
//determine the transition width using the smallest logarithmic bin width and the external fudge parameter time_transition_width_gdm
awidth=pba->time_transition_width_gdm/min_arr(timeratios,pba->time_bins_num_gdm-1);
//stitch pieces together
for (i=0; i < pba->time_bins_num_gdm -1; i++) { //check in which stitching region the time a is
if ((previous_time < a_rel) && (a_rel <= timetable[i])) {
lnap = awidth*log(a_rel/pba->time_values_gdm[i]);
// the cx1 is earlier than cx2. And c1x is at smaller k than c2x.
twoD = c2_piece(lnap,
c_values_gdm[i],
c_values_gdm[i+1]);
break; //this breaks out of the time-loop only (i-loop)
}
else {
previous_time = timetable[i];
}
}
}
/*--> sharp bins case */
else {
for (i=0; i < pba->time_bins_num_gdm; i++) { // check in which pixel a is
if((previous_time < a_rel) && (a_rel <= pba->time_values_gdm[i])) {
twoD = c_values_gdm[i];
break; //this breaks out of the time-loop only (i-loop)
}
else {
previous_time = pba->time_values_gdm[i];
}
}
}
return twoD;
}
// Add here the definitions of GDM sound speed and viscosity
double cs2_gdm_of_a_and_k(struct background *pba,
double a,
double k,
struct perturb_workspace * ppw) {
double cs2=0.;
/* Time-only binned GDM case */
if (pba->type_gdm == time_only_bins_gdm) {
cs2=twoD_pixel(pba, a, k, pba->cs2_values_gdm);
}
/* k^2 dependent sound speed */
if (pba->k2_cs2_gdm == _TRUE_) {
double k_pivot=0.01; //This is currently hard coded, since it likely does not help to specify this manually.
double ca2_gdm = ppw->pvecback[pba->index_bg_ca2_gdm];
cs2 = ca2_gdm + k*k/k_pivot/k_pivot*cs2;
if(cs2 > 1.){ //take care of potential superluminal speed at small scales: we transition from k^2 dependence to cs2=1 when necessary.
cs2=1.;
}
if(cs2 < 0.){ //take care of potential negative speed at large scales: we cap cs2 at cs2=0 when necessary. This cannot not occurr if ca2 is chosen non-negative. However ca2 might be negative even if w chosen non-negative.
cs2=0.;
}
// Maybe better to add something like?:
//class_test(ca2_gdm >= 0,
// pba->error_message,
// "Stopped because ca2 is not non-negative, which is required by the GDM perturbations to be stable for k2-dependent sound speed.");
}
return cs2;
}
// cv2_gdm_of_a_and_k is for time_only_bins_gdm is an exact copy of cs2_gdm_of_a_and_k. So there should be a nicer way to write down both functions.
double cv2_gdm_of_a_and_k(struct background *pba,
double a,
double k) {
double cv2 =0;
/* Time-only binned GDM case */
if (pba->type_gdm == time_only_bins_gdm) {
cv2 = twoD_pixel(pba, a, k, pba->cv2_values_gdm);
}
return cv2;
}
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
/* Create an empty vector object */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
v->ops->nvclone = N_VClone_OpenMPDEV;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
v->ops->nvdestroy = N_VDestroy_OpenMPDEV;
v->ops->nvspace = N_VSpace_OpenMPDEV;
v->ops->nvgetlength = N_VGetLength_OpenMPDEV;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
v->ops->nvconst = N_VConst_OpenMPDEV;
v->ops->nvprod = N_VProd_OpenMPDEV;
v->ops->nvdiv = N_VDiv_OpenMPDEV;
v->ops->nvscale = N_VScale_OpenMPDEV;
v->ops->nvabs = N_VAbs_OpenMPDEV;
v->ops->nvinv = N_VInv_OpenMPDEV;
v->ops->nvaddconst = N_VAddConst_OpenMPDEV;
v->ops->nvdotprod = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
v->ops->nvmin = N_VMin_OpenMPDEV;
v->ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
v->ops->nvl1norm = N_VL1Norm_OpenMPDEV;
v->ops->nvcompare = N_VCompare_OpenMPDEV;
v->ops->nvinvtest = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_OpenMPDEV;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMPDEV;
v->ops->nvminlocal = N_VMin_OpenMPDEV;
v->ops->nvl1normlocal = N_VL1Norm_OpenMPDEV;
v->ops->nvinvtestlocal = N_VInvTest_OpenMPDEV;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMPDEV;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMPDEV;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMPDEV;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMPDEV;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Update ownership */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata,
realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Update ownership flag */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if (data == NULL) { N_VDestroy(v); return(NULL); }
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if (dev_data == NULL) { N_VDestroy(v); return(NULL); }
/* Attach data */
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data arrays if they are owned by the vector */
if (NV_OWN_DATA_OMPDEV(v)) {
if (NV_DATA_HOST_OMPDEV(v) != NULL) {
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
}
if (NV_DATA_DEV_OMPDEV(v) != NULL) {
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMPDEV(x, w)/(NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMPDEV(x, w, id) / (NV_LENGTH_OMPDEV(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
GB_select_phase1.c | //------------------------------------------------------------------------------
// GB_select_count: count entries in eacn vector for C=select(A,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#if defined ( GB_ENTRY_SELECTOR )
#define GB_CTYPE int64_t
#include "GB_reduce_each_vector.c"
#else
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
//--------------------------------------------------------------------------
// tril, triu, diag, offdiag, resize: binary search in each vector
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (k = 0 ; k < anvec ; k++)
{
//----------------------------------------------------------------------
// get A(:,k)
//----------------------------------------------------------------------
int64_t pA_start = Ap [k] ;
int64_t pA_end = Ap [k+1] ;
int64_t p = pA_start ;
int64_t cjnz = 0 ;
int64_t ajnz = pA_end - pA_start ;
bool found = false ;
if (ajnz > 0)
{
//------------------------------------------------------------------
// search for the entry A(i,k)
//------------------------------------------------------------------
int64_t ifirst = Ai [pA_start] ;
int64_t ilast = Ai [pA_end-1] ;
#if defined ( GB_RESIZE_SELECTOR )
int64_t i = ithunk ;
#else
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t i = j-ithunk ;
#endif
if (i < ifirst)
{
// all entries in A(:,k) come after i
;
}
else if (i > ilast)
{
// all entries in A(:,k) come before i
p = pA_end ;
}
else if (ajnz == avlen)
{
// A(:,k) is dense
found = true ;
p += i ;
ASSERT (Ai [p] == i) ;
}
else
{
// binary search for A (i,k)
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH (i, Ai, p, pright, found) ;
}
#if defined ( GB_TRIL_SELECTOR )
// keep p to pA_end-1
cjnz = pA_end - p ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// if found, keep pA_start to p
// else keep pA_start to p-1
if (found)
{
p++ ;
// now in both cases, keep pA_start to p-1
}
// keep pA_start to p-1
cjnz = p - pA_start ;
#elif defined ( GB_DIAG_SELECTOR )
// if found, keep p
// else keep nothing
cjnz = found ;
if (!found) p = -1 ;
// if (cjnz >= 0) keep p, else keep nothing
#elif defined ( GB_OFFDIAG_SELECTOR )
// if found, keep pA_start to p-1 and p+1 to pA_end-1
// else keep pA_start to pA_end
cjnz = ajnz - found ;
if (!found)
{
p = pA_end ;
// now just keep pA_start to p-1; p+1 to pA_end is
// now empty
}
// in both cases, keep pA_start to p-1 and
// p+1 to pA_end-1. If the entry is not found, then
// p == pA_end, and all entries are kept.
#endif
}
//----------------------------------------------------------------------
// log the result for the kth vector
//----------------------------------------------------------------------
Zp [k] = p ;
Cp [k] = cjnz ;
}
//--------------------------------------------------------------------------
// compute Wfirst and Wlast for each task
//--------------------------------------------------------------------------
// Wfirst [0..ntasks-1] and Wlast [0..ntasks-1] are required for
// constructing C_start_slice [0..ntasks-1] in GB_selector.
int64_t *GB_RESTRICT Wfirst = (int64_t *) Wfirst_space ;
int64_t *GB_RESTRICT Wlast = (int64_t *) Wlast_space ;
for (int tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pA_start = pstart_slice [tid] ;
int64_t pA_end = GB_IMIN (Ap [kfirst+1], pstart_slice [tid+1]) ;
if (pA_start < pA_end)
{
#if defined ( GB_TRIL_SELECTOR )
// keep Zp [kfirst] to pA_end-1
int64_t p = GB_IMAX (Zp [kfirst], pA_start) ;
Wfirst [tid] = GB_IMAX (0, pA_end - p) ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// keep pA_start to Zp [kfirst]-1
int64_t p = GB_IMIN (Zp [kfirst], pA_end) ;
Wfirst [tid] = GB_IMAX (0, p - pA_start) ;
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
int64_t p = Zp [kfirst] ;
Wfirst [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ;
#elif defined ( GB_OFFDIAG_SELECTOR )
// keep pA_start to Zp [kfirst]-1
int64_t p = GB_IMIN (Zp [kfirst], pA_end) ;
Wfirst [tid] = GB_IMAX (0, p - pA_start) ;
// keep Zp [kfirst]+1 to pA_end-1
p = GB_IMAX (Zp [kfirst]+1, pA_start) ;
Wfirst [tid] += GB_IMAX (0, pA_end - p) ;
#endif
}
}
if (kfirst < klast)
{
int64_t pA_start = Ap [klast] ;
int64_t pA_end = pstart_slice [tid+1] ;
if (pA_start < pA_end)
{
#if defined ( GB_TRIL_SELECTOR )
// keep Zp [klast] to pA_end-1
int64_t p = GB_IMAX (Zp [klast], pA_start) ;
Wlast [tid] = GB_IMAX (0, pA_end - p) ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// keep pA_start to Zp [klast]-1
int64_t p = GB_IMIN (Zp [klast], pA_end) ;
Wlast [tid] = GB_IMAX (0, p - pA_start) ;
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
int64_t p = Zp [klast] ;
Wlast [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ;
#elif defined ( GB_OFFDIAG_SELECTOR )
// keep pA_start to Zp [klast]-1
int64_t p = GB_IMIN (Zp [klast], pA_end) ;
Wlast [tid] = GB_IMAX (0, p - pA_start) ;
// keep Zp [klast]+1 to pA_end-1
p = GB_IMAX (Zp [klast]+1, pA_start) ;
Wlast [tid] += GB_IMAX (0, pA_end - p) ;
#endif
}
}
}
#endif
|
SimulatorBase.h | /*
Menge Crowd Simulation Framework
Copyright and trademark 2012-17 University of North Carolina at Chapel Hill
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
or
LICENSE.txt in the root of the Menge repository.
Any questions or comments should be sent to the authors menge@cs.unc.edu
<http://gamma.cs.unc.edu/Menge/>
*/
#ifndef __SIMULATOR_BASE_H__
#define __SIMULATOR_BASE_H__
/*!
@file SimulatorBase.h
@brief Contains the SimulatorBase class - the common, generic simulator to work with different
types of agents. It is templated on the Agent type.
*/
#include "MengeCore/Agents/AgentInitializer.h"
#include "MengeCore/Agents/SimulatorInterface.h"
#include "MengeCore/Agents/SpatialQueries/SpatialQuery.h"
#include "MengeCore/Runtime/Utils.h"
#include "MengeCore/mengeCommon.h"
#include <vector>
#if HAVE_OPENMP || _OPENMP
#include <omp.h>
#endif
namespace Menge {
namespace Agents {
/*!
@brief Defines the basic simulator. It is responsible for tracking agents and obstacles as
well as initializing such from files.
*/
template <class Agent>
class SimulatorBase : public SimulatorInterface {
public:
/*!
@brief Constructs a simulator instance.
*/
SimulatorBase();
/*!
@brief Destorys a simulator instance.
*/
~SimulatorBase();
/*!
@brief Lets the simulator perform a simulation step and updates the two-dimensional _p and
two-dimensional velocity of each agent.
*/
void doStep();
/*!
@brief Initalize spatial query structure.
*/
virtual bool initSpatialQuery();
/*!
@brief After all agents and all obstacles have been added to the scene does the work to finish
preparing the simulation to be run.
This work is performed when the simulator is done being initialized. If a particular new
pedestrian simulator requires particular finalization work, this function should be sub-classed
and the parent class's version of the function should be explicitly called before any additional
work is performed.
*/
virtual void finalize();
/*!
@brief Accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the
same as the agent identifier. It is merely the local index of the agent
in the simulator's local store.
@returns A pointer to the agent.
*/
virtual BaseAgent* getAgent(size_t agentNo) { return &_agents[agentNo]; }
/*!
@brief Const accessor for agents.
@param agentNo The number of the agent who is to be retrieved. This is *not* the same
as the agent identifier. It is merely the local index of the agent in the
simulator's local store.
@returns A pointer to the agent.
*/
virtual const BaseAgent* getAgent(size_t agentNo) const { return &_agents[agentNo]; }
/*!
@brief Add an agent with specified position to the simulator whose properties are defined by
the given agent initializer.
It uses the agent initializer to define the values of the remaining agent parameters.
@param pos The 2d vector representing the agent's position.
@param agentInit The AgentInitializer necessary to parse AgentSet properties.
@returns A pointer to the agent (if initialization was succesful) or NULL if failed.
*/
virtual BaseAgent* addAgent(const Vector2& pos, AgentInitializer* agentInit);
/*!
@brief Returns the count of agents in the simulation.
@returns The count of agents in the simulation.
*/
virtual size_t getNumAgents() const { return _agents.size(); }
/*!
@brief Reports if there are non-common Experiment parameters that this simulator requires in
the XML file.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool hasExpTarget() { return false; }
/*!
@brief Reports if the given Experiment attribute tag name belongs to this simulator.
@param tagName The name of the candidate experiment XML tag.
@returns By default, the simulator base ONLY uses common parameters. Always returns false.
*/
virtual bool isExpTarget(const std::string& tagName) { return false; }
/*!
@brief Given an Experiment parameter name and value, sets the appropriate simulator
parameter.
// TODO: Define the conditions of success/failure.
@param paramName A string containing the parameter name for the experiment.
@param value A string containing the value for the parameter.
@returns True if the parameter was successfully set, false otherwise.
*/
virtual bool setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException);
protected:
/*!
@brief Computes the neighbors for the given agent.
@param agent The agent whose neighbors are to be computed.
*/
void computeNeighbors(Agent* agent);
/*!
@brief The collection of agents in the simulation
*/
std::vector<Agent> _agents;
};
////////////////////////////////////////////////////////////////
// Implementation of SimulatorBase
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::SimulatorBase() : SimulatorInterface(), _agents() {}
////////////////////////////////////////////////////////////////
template <class Agent>
SimulatorBase<Agent>::~SimulatorBase() {
_agents.clear();
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::doStep() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
_spatialQuery->updateAgents();
int AGT_COUNT = static_cast<int>(_agents.size());
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
computeNeighbors(&(_agents[i]));
if(!_agents[i]._external)
_agents[i].computeNewVelocity();
}
#pragma omp parallel for
for (int i = 0; i < AGT_COUNT; ++i) {
if(!_agents[i]._external)
_agents[i].update(TIME_STEP);
}
_globalTime += TIME_STEP;
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::initSpatialQuery() {
assert(_spatialQuery != 0x0 && "Can't run without a spatial query instance defined");
const size_t AGT_COUNT = _agents.size();
std::vector<BaseAgent*> agtPointers(AGT_COUNT);
for (size_t a = 0; a < AGT_COUNT; ++a) {
agtPointers[a] = &_agents[a];
}
_spatialQuery->setAgents(agtPointers);
_spatialQuery->processObstacles();
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::finalize() {
SimulatorInterface::finalize();
// initialize agents
for (size_t i = 0; i < _agents.size(); ++i) {
_agents[i].initialize();
}
}
////////////////////////////////////////////////////////////////
template <class Agent>
BaseAgent* SimulatorBase<Agent>::addAgent(const Vector2& pos, AgentInitializer* agentInit) {
Agent agent;
agent._pos = pos;
agent._id = _agents.size();
if (!agentInit->setProperties(&agent)) {
logger << Logger::ERR_MSG << "Error initializing agent " << agent._id << "\n";
return 0x0;
}
_agents.push_back(agent);
return &_agents[_agents.size() - 1];
}
////////////////////////////////////////////////////////////////
template <class Agent>
bool SimulatorBase<Agent>::setExpParam(const std::string& paramName,
const std::string& value) throw(XMLParamException) {
if (paramName == "time_step") {
try {
LOGICAL_TIME_STEP = toFloat(value);
} catch (UtilException) {
throw XMLParamException(
std::string("Common parameters \"time_step\" value couldn't be converted "
"to a float. Found the value: ") +
value);
}
} else {
return false;
}
return true;
}
////////////////////////////////////////////////////////////////
template <class Agent>
void SimulatorBase<Agent>::computeNeighbors(Agent* agent) {
// obstacles
agent->startQuery();
_spatialQuery->obstacleQuery(agent);
// agents
if (agent->_maxNeighbors > 0) {
_spatialQuery->agentQuery(agent);
}
}
} // namespace Agents
} // namespace Menge
#endif // __SIMULATOR_BASE_H__
|
pr43893.c | /* PR c/43893 */
/* { dg-do run } */
extern void abort (void);
int
main ()
{
int c;
unsigned int i;
int j;
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 0; i < 1; i++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 0; i <= 0; i++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = - __INT_MAX__ - 1; j < - __INT_MAX__; j++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = - __INT_MAX__ - 1; j <= - __INT_MAX__ - 1; j++)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 2U * __INT_MAX__ + 1; i > 2U * __INT_MAX__; i--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (i = 2U * __INT_MAX__ + 1; i >= 2U * __INT_MAX__ + 1; i--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = __INT_MAX__; j > __INT_MAX__ - 1; j--)
c++;
if (c != 1)
abort ();
c = 0;
#pragma omp parallel for reduction(+:c)
for (j = __INT_MAX__; j >= __INT_MAX__; j--)
c++;
if (c != 1)
abort ();
return 0;
}
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include "gather_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
typedef struct
{
int* in_shape; // the dim of the input
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
// #pragma omp parallel for num_threads(num_thread)
if(param->is_onnx){
for (int outer = 0; outer < outer_size; ++outer)
{
memcpy(out_ptr + (outer * param->indices_num ) * inner_size,
in_ptr + (outer* axis_size + param->indices_num) * inner_size, inner_size* sizeof(float));
}
} else {
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32((float*)input, (int*)indices_data, (float*)output, op_priv_info, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8((uint8_t*)input, (int*)indices_data, (uint8_t*)output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv;
sys_free(op_param->in_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_gather_ref_op()
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
int unregister_gather_ref_op()
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
|
omp_for_collapse.c | // RUN: %libomp-compile-and-run
// REQUIRES: !abt
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/* Utility function to check that i is increasing monotonically
with each call */
static int check_i_islarger (int i)
{
static int last_i;
int islarger;
if (i==1)
last_i=0;
islarger = ((i >= last_i)&&(i - last_i<=1));
last_i = i;
return (islarger);
}
int test_omp_for_collapse()
{
int is_larger = 1;
#pragma omp parallel
{
int i,j;
int my_islarger = 1;
#pragma omp for private(i,j) schedule(static,1) collapse(2) ordered
for (i = 1; i < 100; i++) {
for (j =1; j <100; j++) {
#pragma omp ordered
my_islarger = check_i_islarger(i)&&my_islarger;
}
}
#pragma omp critical
is_larger = is_larger && my_islarger;
}
return (is_larger);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_collapse()) {
num_failed++;
}
}
return num_failed;
}
|
atomic_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
// Denis Demidov
//
#if !defined(KRATOS_ATOMIC_UTILITIES_H_INCLUDED )
#define KRATOS_ATOMIC_UTILITIES_H_INCLUDED
// System includes
// External includes
#ifdef KRATOS_SMP_OPENMP
#include <omp.h>
#endif
// Project includes
#include "includes/define.h"
#include "containers/array_1d.h"
namespace Kratos
{
///@addtogroup KratosCore
/**
* collection of utilities for atomic updates of simple types. (essentially mimics the omp atomic)
*/
/**
* @param target variable being atomically updated by doing target += value
* @param value value being added
*/
template<class TDataType>
inline void AtomicAdd(TDataType& target, const TDataType& value)
{
#pragma omp atomic
target += value;
}
/**
* @param target variable being atomically updated by doing target += value
* @param value value being added
* Specialization for array_1d
* Note that the update is not really atomic, but rather is done component by component
*/
template <class TDataType, std::size_t ArraySize>
inline void AtomicAdd(array_1d<TDataType,ArraySize>& target, const array_1d<TDataType,ArraySize>& value)
{
for (std::size_t i=0; i<ArraySize; ++i) {
AtomicAdd(target[i], value[i]);
}
}
/**
* @param target vector variable being atomically updated by doing target += value
* @param value vector value being added
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TVectorType1, class TVectorType2>
inline void AtomicAddVector(TVectorType1& target, const TVectorType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicAddVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl;
for(std::size_t i=0; i<target.size(); ++i) {
AtomicAdd(target[i], value[i]);
}
}
/**
* @param target matrix variable being atomically updated by doing target -= value
* @param value matrix value being subtracted
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TMatrixType1, class TMatrixType2>
inline void AtomicAddMatrix(TMatrixType1& target, const TMatrixType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicAddMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl;
for(std::size_t i=0; i<target.size1(); ++i) {
for(std::size_t j=0; j<target.size2(); ++j) {
AtomicAdd(target(i,j), value(i,j));
}
}
}
/**
* @param target vector variable being atomically updated by doing target -= value
* @param value vector value being subtracted
*/
template<class TDataType>
inline void AtomicSub(TDataType& target, const TDataType& value)
{
#pragma omp atomic
target -= value;
}
/**
* @param target variable being atomically updated by doing target -= value
* @param value value being subtracted
* Specialization for array_1d
* Note that the update is not really atomic, but rather is done component by component
*/
template <class TDataType, std::size_t ArraySize>
inline void AtomicSub(array_1d<TDataType,ArraySize>& target, const array_1d<TDataType,ArraySize>& value)
{
for(std::size_t i=0; i<ArraySize; ++i) {
AtomicSub(target[i], value[i]);
}
}
/**
* @param target vector variable being atomically updated by doing target -= value
* @param value vector value being subtracted
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TVectorType1, class TVectorType2>
inline void AtomicSubVector(TVectorType1& target, const TVectorType2& value) {
KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicSubVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl;
for(std::size_t i=0; i<target.size(); ++i) {
AtomicSub(target[i], value[i]);
}
}
/**
* @param target matrix variable being atomically updated by doing target -= value
* @param value matrix value being subtracted
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TMatrixType1, class TMatrixType2>
inline void AtomicSubMatrix(TMatrixType1& target, const TMatrixType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicSubMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl;
for(std::size_t i=0; i<target.size1(); ++i) {
for(std::size_t j=0; j<target.size2(); ++j) {
AtomicSub(target(i,j), value(i,j));
}
}
}
/** @param target vector variable being atomically updated by doing target *= value
* @param value vector value being multiplied
*/
template<class TDataType>
inline void AtomicMult(TDataType& target, const TDataType& value)
{
#pragma omp atomic
target *= value;
}
/** @param target variable being atomically updated by doing target *= value
* @param value value being multiplied
* Specialization for array_1d
* Note that the update is not really atomic, but rather is done component by component
*/
template <class TDataType, std::size_t ArraySize>
inline void AtomicMult(array_1d<TDataType,ArraySize>& target, const array_1d<TDataType,ArraySize>& value)
{
for(std::size_t i=0; i<ArraySize; ++i) {
AtomicMult(target[i], value[i]);
}
}
/** @param target vector variable being atomically updated by doing target *= value
* @param value vector value being multiplied
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TVectorType1, class TVectorType2>
inline void AtomicMultVector(TVectorType1& target, const TVectorType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicMultVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl;
for(std::size_t i=0; i<target.size(); ++i) {
AtomicMult(target[i], value[i]);
}
}
/**
* @param target matrix variable being atomically updated by doing target *= value
* @param value matrix value being multiplied
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TMatrixType1, class TMatrixType2>
inline void AtomicMultMatrix(TMatrixType1& target, const TMatrixType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicMultMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl;
for(std::size_t i=0; i<target.size1(); ++i) {
for(std::size_t j=0; j<target.size2(); ++j) {
AtomicMult(target(i,j), value(i,j));
}
}
}
/** @param target vector variable being atomically updated by doing target *= 1.0/value
* @param value vector value being divided
*/
template<class TDataType>
inline void AtomicDiv(TDataType& target, const TDataType& value)
{
AtomicMult(target, 1.0/value);
}
/** @param target variable being atomically updated by doing target *= 1.0/value
* @param value value being divided
* Specialization for array_1d
* Note that the update is not really atomic, but rather is done component by component
*/
template <class TDataType, std::size_t ArraySize>
inline void AtomicDiv(array_1d<TDataType,ArraySize>& target, const array_1d<TDataType,ArraySize>& value)
{
for(std::size_t i=0; i<ArraySize; ++i) {
AtomicDiv(target[i], value[i]);
}
}
/** @param target vector variable being atomically updated by doing target *= 1.0/value
* @param value vector value being divided
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TVectorType1, class TVectorType2>
inline void AtomicDivVector(TVectorType1& target, const TVectorType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size() != value.size()) << "vector size mismatch in vector AtomicDivVector- Sizes are: " << target.size() << " for target and " << value.size() << " for value " << std::endl;
for(std::size_t i=0; i<target.size(); ++i) {
AtomicDiv(target[i], value[i]);
}
}
/**
* @param target matrix variable being atomically updated by doing target *= 1.0/value
* @param value matrix value being divided
* Note that the update is not really atomic, but rather is done component by component
*/
template<class TMatrixType1, class TMatrixType2>
inline void AtomicDivMatrix(TMatrixType1& target, const TMatrixType2& value)
{
KRATOS_DEBUG_ERROR_IF(target.size1() != value.size1() || target.size2() != value.size2()) << "matrix size mismatch in matrix AtomicDivMatrix- Sizes are: " << target.size1() << "x" << target.size2() << " for target and " << value.size1() << "x" << value.size2() << " for value " << std::endl;
for(std::size_t i=0; i<target.size1(); ++i) {
for(std::size_t j=0; j<target.size2(); ++j) {
AtomicDiv(target(i,j), value(i,j));
}
}
}
} // namespace Kratos.
#endif // KRATOS_ATOMIC_UTILITIES_H_INCLUDED defined
|
taskwait-depend.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// taskwait with depend clause was introduced with gcc-9
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8
// clang does not yet support taskwait with depend clause
// clang-12 introduced parsing, but no codegen
// update expected result when codegen in clang was added
// XFAIL: clang
#include "callback.h"
#include <omp.h>
int main() {
int x = 0;
#pragma omp parallel num_threads(2)
{
#pragma omp master
{
print_ids(0);
printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value,
&x);
#pragma omp task depend(out : x)
{ x++; }
print_fuzzy_address(1);
#pragma omp taskwait depend(in: x)
print_fuzzy_address(2);
}
}
return 0;
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]],
// CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]],
// CHECK-SAME: reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_inout)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create:
// CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]],
// CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}},
// CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]],
// CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}},
// CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred|
// CHECK-SAME: ompt_task_mergeable=1207959556, has_dependences=yes
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences:
// CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]],
// CHECK-SAME: ompt_dependence_type_in)], ndeps=1
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[SECOND_TASK]]
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
|
GB_transplant.c | //------------------------------------------------------------------------------
// GB_transplant: replace contents of one matrix with another
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Transplant A into C, and then free A. If any part of A is shallow, or if A
// must be typecasted, a deep copy is made into C. Prior content of C is
// ignored. Then A is freed, except for any shallow components of A which are
// left untouched (after unlinking them from A). The resulting matrix C is not
// shallow. This function is not user-callable. The new type of C (ctype)
// must be compatible with A->type.
// Only GrB_SUCCESS and GrB_OUT_OF_MEMORY are returned by this function.
#include "GB.h"
GrB_Info GB_transplant // transplant one matrix into another
(
GrB_Matrix C, // output matrix to overwrite with A
const GrB_Type ctype, // new type of C
GrB_Matrix *Ahandle, // input matrix to copy from and free
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Ahandle != NULL) ;
GrB_Matrix A = *Ahandle ;
ASSERT (!GB_aliased (C, A)) ;
ASSERT (C != NULL) ;
ASSERT_MATRIX_OK (A, "A before transplant", GB0) ;
ASSERT_TYPE_OK (ctype, "new type for C", GB0) ;
// pending tuples may not appear in A
ASSERT (!GB_PENDING (A)) ;
// zombies in A can be safely transplanted into C
ASSERT (GB_ZOMBIES_OK (A)) ;
// C is about to be cleared, so zombies and pending tuples are OK
ASSERT (GB_PENDING_OK (C)) ; ASSERT (GB_ZOMBIES_OK (C)) ;
// the ctype and A->type must be compatible. C->type is ignored
ASSERT (GB_Type_compatible (ctype, A->type)) ;
int64_t avdim = A->vdim ;
int64_t avlen = A->vlen ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// save prior pattern of C, if dense
//--------------------------------------------------------------------------
bool A_is_dense = GB_is_dense (A) ;
bool keep_Cp_and_Ci = // keep C->p and C->i if:
(
GB_is_dense (C) // both A and C are dense
&& A_is_dense
&& !GB_ZOMBIES (C) // neither have zombies
&& !GB_ZOMBIES (A)
&& !(C->p_shallow) // Cp and Ci are not shallow
&& !(C->i_shallow)
&& !C->is_hyper // both A and C are standard
&& !A->is_hyper
&& C->vdim == avdim // A and C have the same size
&& C->vlen == avlen
&& C->is_csc == A->is_csc // A and C have the same format
&& C->p != NULL
&& C->i != NULL // Cp and Ci exist
) ;
int64_t *GB_RESTRICT Cp_keep = NULL ;
int64_t *GB_RESTRICT Ci_keep = NULL ;
int64_t cplen_keep = 0 ;
int64_t cnvec_keep = 0 ;
if (keep_Cp_and_Ci)
{
// Keep C->p and C->i by removing them from C. They already contain
// the right pattern for a dense matrix C. No need to free it and
// recreate the same thing.
GBBURBLE ("(remains dense) ") ;
Cp_keep = C->p ;
Ci_keep = C->i ;
cplen_keep = C->plen ;
cnvec_keep = C->nvec ;
C->p = NULL ;
C->i = NULL ;
}
//--------------------------------------------------------------------------
// clear C and transplant the type, size, and hypersparsity
//--------------------------------------------------------------------------
// free all content of C
GB_PHIX_FREE (C) ;
ASSERT (!GB_PENDING (C)) ;
ASSERT (!GB_ZOMBIES (C)) ;
ASSERT (C->nzmax == 0) ;
// It is now safe to change the type, dimension, and hypersparsity of C
C->type = ctype ;
C->type_size = ctype->size ;
C->is_csc = A->is_csc ;
C->is_hyper = A->is_hyper ;
C->vlen = avlen ;
C->vdim = avdim ;
ASSERT (A->nvec_nonempty == -1 || // can be postponed
A->nvec_nonempty == GB_nvec_nonempty (A, Context)) ;
C->nvec_nonempty = A->nvec_nonempty ;
// C->hyper_ratio is not modified by the transplant
// C is not shallow, and has no content
ASSERT (!C->p_shallow && !C->h_shallow && !C->i_shallow && !C->x_shallow) ;
ASSERT (C->h == NULL && C->p == NULL && C->i == NULL && C->x == NULL) ;
//--------------------------------------------------------------------------
// transplant A->p vector pointers and A->h hyperlist
//--------------------------------------------------------------------------
if (keep_Cp_and_Ci)
{
//----------------------------------------------------------------------
// keep existing C->p
//----------------------------------------------------------------------
C->p = Cp_keep ;
Cp_keep = NULL ;
C->h = NULL ;
C->plen = cplen_keep ;
C->nvec = cnvec_keep ;
// free any non-shallow A->p and A->h content of A
GB_ph_free (A) ;
}
else if (A->p_shallow || A->h_shallow)
{
//----------------------------------------------------------------------
// A->p or A->h are shallow copies another matrix; make a deep copy
//----------------------------------------------------------------------
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
if (A->is_hyper)
{
// A is hypersparse, create new C->p and C->h
C->plen = anvec ;
C->nvec = anvec ;
C->p = GB_MALLOC (C->plen+1, int64_t) ;
C->h = GB_MALLOC (C->plen , int64_t) ;
if (C->p == NULL || C->h == NULL)
{
// out of memory
GB_PHIX_FREE (C) ;
GB_MATRIX_FREE (Ahandle) ;
return (GB_OUT_OF_MEMORY) ;
}
// copy A->p and A->h into the newly created C->p and C->h
GB_memcpy (C->p, A->p, (anvec+1) * sizeof (int64_t), nth) ;
GB_memcpy (C->h, A->h, anvec * sizeof (int64_t), nth) ;
}
else
{
// A is non-hypersparse, create new C->p
C->plen = avdim ;
C->nvec = avdim ;
C->p = GB_MALLOC (C->plen+1, int64_t) ;
if (C->p == NULL)
{
// out of memory
GB_PHIX_FREE (C) ;
GB_MATRIX_FREE (Ahandle) ;
return (GB_OUT_OF_MEMORY) ;
}
if (A_is_dense)
{
// create C->p for a dense matrix C
int64_t *GB_RESTRICT Cp = C->p ;
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k <= avdim ; k++)
{
Cp [k] = k * avlen ;
}
}
else
{
// copy A->p into the newly created C->p
GB_memcpy (C->p, A->p, (avdim+1) * sizeof (int64_t), nth) ;
}
}
// free any non-shallow A->p and A->h content of A
GB_ph_free (A) ;
}
else
{
//----------------------------------------------------------------------
// both A->p and A->h are not shallow: quick transplant into C
//----------------------------------------------------------------------
// Quick transplant of A->p and A->h into C. This works for both
// standard and hypersparse cases.
ASSERT (C->p == NULL) ;
ASSERT (C->h == NULL) ;
C->p = A->p ;
C->h = A->h ;
C->plen = A->plen ;
C->nvec = anvec ;
}
// A->p and A->h have been freed or removed from A
A->p = NULL ;
A->h = NULL ;
A->p_shallow = false ;
A->h_shallow = false ;
C->p_shallow = false ;
C->h_shallow = false ;
C->magic = GB_MAGIC ; // C is now initialized
if (anz == 0)
{
// quick return if A has no entries
// Ci_keep is not needed after all, since C is empty
GB_FREE (Ci_keep) ;
ASSERT_MATRIX_OK (C, "C empty transplant", GB0) ;
GB_MATRIX_FREE (Ahandle) ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// allocate new space for C->i and C->x if A is shallow
//--------------------------------------------------------------------------
// get C->nzmax: if either C->x or C->i must be allocated, then C->nzmax
// is set to their minimum size. Otherwise, if both C->x and C->i can
// be transplanted from A, then they inherit the nzmax of A.
// Do not allocate C->i if the pattern of a dense matrix C is being kept.
ASSERT (C->x == NULL && C->i == NULL) ;
bool allocate_Ci = (A->i_shallow) && !keep_Cp_and_Ci ;
bool allocate_Cx = (A->x_shallow || C->type != A->type) ;
C->nzmax = (allocate_Cx || allocate_Ci) ? anz : A->nzmax ;
C->nzmax = GB_IMAX (C->nzmax, 1) ;
// allocate new components if needed
bool ok = true ;
if (allocate_Cx)
{
// allocate new C->x component
C->x = GB_MALLOC (C->nzmax * C->type->size, GB_void) ;
ok = ok && (C->x != NULL) ;
}
if (allocate_Ci)
{
// allocate new C->i component
C->i = GB_MALLOC (C->nzmax, int64_t) ;
ok = ok && (C->i != NULL) ;
}
if (!ok)
{
// out of memory
GB_PHIX_FREE (C) ;
GB_MATRIX_FREE (Ahandle) ;
GB_FREE (Ci_keep) ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// transplant or copy A->x numerical values
//--------------------------------------------------------------------------
// Note that A may contain zombies, and the values of these zombies may be
// uninitialized values in A->x. All entries are typecasted or memcpy'ed
// from A->x to C->x, both zombies and live entries alike. valgrind may
// complain about typecasting these uninitialized values, but these
// warnings are false positives. The output of the typecasting is itself a
// zombie, and the values of all zombies are ignored.
ASSERT_TYPE_OK (C->type, "target C->type for values", GB0) ;
ASSERT_TYPE_OK (A->type, "source A->type for values", GB0) ;
if (C->type == A->type)
{
// types match
if (A->x_shallow)
{
// A is shallow so make a deep copy; no typecast needed
GB_memcpy (C->x, A->x, anz * C->type->size, nthreads) ;
A->x = NULL ;
}
else
{
// OK to move pointers instead
C->x = A->x ;
A->x = NULL ;
}
}
else
{
// types differ, must typecast from A to C.
GB_void *GB_RESTRICT Cx = (GB_void *) C->x ;
GB_void *GB_RESTRICT Ax = (GB_void *) A->x ;
GB_cast_array (Cx, C->type->code,
Ax, A->type->code, A->type->size, anz, nthreads) ;
if (!A->x_shallow)
{
GB_FREE (A->x) ;
}
A->x = NULL ;
}
ASSERT (A->x == NULL) ; // has been freed or removed
A->x_shallow = false ;
ASSERT (C->x != NULL) ;
C->x_shallow = false ;
//--------------------------------------------------------------------------
// transplant or copy A->i row indices
//--------------------------------------------------------------------------
if (keep_Cp_and_Ci)
{
//----------------------------------------------------------------------
// keep existing C->i
//----------------------------------------------------------------------
// C is dense; restore the prior C->i. A->i will be freed
C->i = Ci_keep ;
Ci_keep = NULL ;
}
else if (A->i_shallow)
{
//----------------------------------------------------------------------
// A->i is a shallow copy of another matrix, so we need a deep copy
//----------------------------------------------------------------------
if (A_is_dense && !GB_ZOMBIES (A))
{
// create C->i for a dense matrix C
int64_t *GB_RESTRICT Ci = C->i ;
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < anz ; pC++)
{
Ci [pC] = pC % avlen ;
}
}
else
{
// copy A->i into C->i
GB_memcpy (C->i, A->i, anz * sizeof (int64_t), nthreads) ;
}
A->i = NULL ;
A->i_shallow = false ;
}
else
{
//----------------------------------------------------------------------
// A->i is not shallow, so just transplant the pointer from A to C
//----------------------------------------------------------------------
C->i = A->i ;
A->i = NULL ;
A->i_shallow = false ;
}
ASSERT (C->i != NULL) ;
C->i_shallow = false ;
C->nzombies = A->nzombies ; // zombies may have been transplanted into C
if (!GB_queue_insert (C)) GB_PANIC ; // TODO in 4.0: delete
//--------------------------------------------------------------------------
// free A and return result
//--------------------------------------------------------------------------
GB_MATRIX_FREE (Ahandle) ;
ASSERT_MATRIX_OK (C, "C after transplant", GB0) ;
return (GrB_SUCCESS) ;
}
|
symm_x_dia_n_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Number* Y = &y[index2(cc,0,ldy)];
for (ALPHA_INT i = 0; i < mat->rows; i++)
alpha_mul(Y[i],Y[i],beta);
const ALPHA_Number* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d > 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val,X[ac]);
alpha_madde(Y[ac],val,X[ar]);
}
}
if(d == 0){
for(ALPHA_INT r = 0; r < mat->rows; ++r){
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha);
alpha_madde(Y[r],val,X[r]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
convolution_3x3_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 4b-4a-inch/4a-64-outch/4b;
#if __aarch64__
kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16);
#else
kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 16, 16);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
#if __aarch64__
Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
#else
Mat g0 = kernel_tm_pack4.channel(q / 4);
#endif
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k01[k];
g00[5] = k11[k];
g00[6] = k21[k];
g00[7] = k31[k];
g00[8] = k02[k];
g00[9] = k12[k];
g00[10] = k22[k];
g00[11] = k32[k];
g00[12] = k03[k];
g00[13] = k13[k];
g00[14] = k23[k];
g00[15] = k33[k];
g00 += 16;
}
}
}
}
static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
int nn_outch = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n" // r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"veor q8, q8 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 4b-4a-inch/4a-36-outch/4b;
#if __aarch64__
kernel_tm_pack4.create(2 * inch / 4, 36, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16);
#else
kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 16, 16);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack4.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
#if __aarch64__
Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
#else
Mat g0 = kernel_tm_pack4.channel(q / 4);
#endif
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k01[k];
g00[5] = k11[k];
g00[6] = k21[k];
g00[7] = k31[k];
g00[8] = k02[k];
g00[9] = k12[k];
g00[10] = k22[k];
g00[11] = k32[k];
g00[12] = k03[k];
g00[13] = k13[k];
g00[14] = k23[k];
g00[15] = k33[k];
g00 += 16;
}
}
}
}
static void conv3x3s1_winograd42_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, elemsize, elempack, opt.workspace_allocator);
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
int nn_outch = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
const Mat kernel01_tm = kernel_tm.channel(pp);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k01 = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n" // r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(k01) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(k01)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < tiles; i += 2)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"veor q8, q8 \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
// TODO neon optimize
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v28.4s}, [%1] \n" // r08
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v20.4s, v24.4s, v8.s[0] \n"
"fmla v21.4s, v24.4s, v10.s[0] \n"
"fmla v22.4s, v24.4s, v12.s[0] \n"
"fmla v23.4s, v24.4s, v14.s[0] \n"
"fmla v20.4s, v25.4s, v8.s[1] \n"
"fmla v21.4s, v25.4s, v10.s[1] \n"
"fmla v22.4s, v25.4s, v12.s[1] \n"
"fmla v23.4s, v25.4s, v14.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v8.s[2] \n"
"fmla v21.4s, v26.4s, v10.s[2] \n"
"fmla v22.4s, v26.4s, v12.s[2] \n"
"fmla v23.4s, v26.4s, v14.s[2] \n"
"fmla v20.4s, v27.4s, v8.s[3] \n"
"fmla v21.4s, v27.4s, v10.s[3] \n"
"fmla v22.4s, v27.4s, v12.s[3] \n"
"fmla v23.4s, v27.4s, v14.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v28.4s}, [%2] \n" // r18
"fmla v20.4s, v16.4s, v9.s[0] \n"
"fmla v21.4s, v16.4s, v11.s[0] \n"
"fmla v22.4s, v16.4s, v13.s[0] \n"
"fmla v23.4s, v16.4s, v15.s[0] \n"
"fmla v20.4s, v17.4s, v9.s[1] \n"
"fmla v21.4s, v17.4s, v11.s[1] \n"
"fmla v22.4s, v17.4s, v13.s[1] \n"
"fmla v23.4s, v17.4s, v15.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v9.s[2] \n"
"fmla v21.4s, v18.4s, v11.s[2] \n"
"fmla v22.4s, v18.4s, v13.s[2] \n"
"fmla v23.4s, v18.4s, v15.s[2] \n"
"fmla v20.4s, v19.4s, v9.s[3] \n"
"fmla v21.4s, v19.4s, v11.s[3] \n"
"fmla v22.4s, v19.4s, v13.s[3] \n"
"fmla v23.4s, v19.4s, v15.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v20.4s, v24.4s, v10.s[0] \n"
"fmla v21.4s, v24.4s, v12.s[0] \n"
"fmla v22.4s, v24.4s, v14.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v10.s[1] \n"
"fmla v21.4s, v25.4s, v12.s[1] \n"
"fmla v22.4s, v25.4s, v14.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v20.4s, v26.4s, v10.s[2] \n"
"fmla v21.4s, v26.4s, v12.s[2] \n"
"fmla v22.4s, v26.4s, v14.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v10.s[3] \n"
"fmla v21.4s, v27.4s, v12.s[3] \n"
"fmla v22.4s, v27.4s, v14.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v28.4s}, [%3] \n" // r28
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28");
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n" // r00 r01 r02 r03
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n" // r04 r05 r06 r07
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1 :128] \n" // r08
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n" // r10 r11 r12 r13
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n" // r14 r15 r16 r17
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d8-d9}, [%2 :128] \n" // r18
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n" // r20 r21 r22 r23
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n" // r24 r25 r26 r27
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d0-d1}, [%3 :128] \n" // r28
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j + 1 < outw; j += 2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1] \n" // r04
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.4s}, [%2] \n" // r14
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3] \n" // r24
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n" // r00 r01 r02 r03
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128] \n" // r04
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n" // r10 r11 r12 r13
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d8-d9}, [%2 :128] \n" // r14
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n" // r20 r21 r22 r23
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3 :128] \n" // r24
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; j < outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n" // sum0
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v5.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"add %1, %1, #32 \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"add %2, %2, #32 \n"
"fadd v23.4s, v23.4s, v22.4s \n"
"add %3, %3, #32 \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n" // sum0
"pld [%1, #384] \n"
"vldm %1, {d0-d5} \n" // r00 r01 r02
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmul.f32 q13, q8, d0[0] \n"
"vmul.f32 q14, q9, d0[1] \n"
"vmul.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%2, #384] \n"
"vldm %2, {d0-d5} \n" // r10 r11 r12
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%3, #384] \n"
"vldm %3, {d0-d5} \n" // r20 r21 r22
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d16-d23} \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
// "pld [%4, #512] \n"
"vldm %4, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vadd.f32 q14, q14, q13 \n"
"add %1, %1, #32 \n"
"vadd.f32 q15, q15, q14 \n"
"add %2, %2, #32 \n"
"vadd.f32 q12, q12, q15 \n"
"add %3, %3, #32 \n"
"sub %4, %4, #512 \n" // kptr -= 8 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
static void conv3x3s2_im2col_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
// im2col
Mat bottom_im2col(size, 9, inch, 16u, 4, opt.workspace_allocator);
{
const int gap = (w * 2 - outw * 2) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
Mat out = bottom_im2col.channel(p);
float* ptr0 = out.row(0);
float* ptr1 = out.row(1);
float* ptr2 = out.row(2);
float* ptr3 = out.row(3);
float* ptr4 = out.row(4);
float* ptr5 = out.row(5);
float* ptr6 = out.row(6);
float* ptr7 = out.row(7);
float* ptr8 = out.row(8);
const float* r0 = img.row(0);
const float* r1 = img.row(1);
const float* r2 = img.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r14 = vld1q_f32(r1 + 16);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r24 = vld1q_f32(r2 + 16);
vst1q_f32(ptr0, _r00);
vst1q_f32(ptr0 + 4, _r02);
vst1q_f32(ptr1, _r01);
vst1q_f32(ptr1 + 4, _r03);
vst1q_f32(ptr2, _r02);
vst1q_f32(ptr2 + 4, _r04);
vst1q_f32(ptr3, _r10);
vst1q_f32(ptr3 + 4, _r12);
vst1q_f32(ptr4, _r11);
vst1q_f32(ptr4 + 4, _r13);
vst1q_f32(ptr5, _r12);
vst1q_f32(ptr5 + 4, _r14);
vst1q_f32(ptr6, _r20);
vst1q_f32(ptr6 + 4, _r22);
vst1q_f32(ptr7, _r21);
vst1q_f32(ptr7 + 4, _r23);
vst1q_f32(ptr8, _r22);
vst1q_f32(ptr8 + 4, _r24);
r0 += 16;
r1 += 16;
r2 += 16;
ptr0 += 8;
ptr1 += 8;
ptr2 += 8;
ptr3 += 8;
ptr4 += 8;
ptr5 += 8;
ptr6 += 8;
ptr7 += 8;
ptr8 += 8;
}
for (; j < outw; j++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r22 = vld1q_f32(r2 + 8);
vst1q_f32(ptr0, _r00);
vst1q_f32(ptr1, _r01);
vst1q_f32(ptr2, _r02);
vst1q_f32(ptr3, _r10);
vst1q_f32(ptr4, _r11);
vst1q_f32(ptr5, _r12);
vst1q_f32(ptr6, _r20);
vst1q_f32(ptr7, _r21);
vst1q_f32(ptr8, _r22);
r0 += 8;
r1 += 8;
r2 += 8;
ptr0 += 4;
ptr1 += 4;
ptr2 += 4;
ptr3 += 4;
ptr4 += 4;
ptr5 += 4;
ptr6 += 4;
ptr7 += 4;
ptr8 += 4;
}
r0 += gap;
r1 += gap;
r2 += gap;
}
}
}
im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
aux.c | #include "aux.h"
int *stacks_list;
int *stacks_cnts;
int cnt, cnt2;
long usecs (){
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec*1000000+t.tv_usec;
}
void mysleep(double sec){
long s, e;
s=0; e=0;
s = usecs();
while(((double) e-s)/1000000 < sec)
{
e = usecs();
}
return;
}
void init_stacks(stack_t **stacks, int n){
int i;
*stacks = (stack_t*)malloc(n*sizeof(stack_t));
for(i=0; i<n; i++){
(*stacks)[i].cnt = 0;
(*stacks)[i].elems = (int*)malloc(MAXELEMS*sizeof(int));
}
stacks_list = (int*)malloc(MAXELEMS*sizeof(int));
stacks_cnts = (int*)malloc(MAXELEMS*sizeof(int));
for(i=0; i<MAXELEMS; i++){
stacks_cnts[i] = 0;
stacks_list[i] = rand()%n;
}
cnt = 0;
cnt2 = 0;
}
void free_stacks(stack_t **stacks, int n){
int i;
for(i=0; i<n; i++){
(*stacks)[i].cnt = 0;
free((*stacks)[i].elems);
}
free(*stacks);
free(stacks_list);
free(stacks_cnts);
cnt = 0;
}
int get_random_stack(){
int c;
#pragma omp atomic capture
c = cnt++;
if(c >= MAXELEMS){
return -1;
} else {
return stacks_list[c];
}
}
int process(){
int c;
mysleep(0.0001);
#pragma omp atomic capture
c = cnt2++;
return c;
}
void check_result(stack_t *stacks, int n){
int i, j;
int *check;
/* for(i=0; i<n; i++){ */
/* for(j=0; j<stacks[i].cnt; j++){ */
/* if(stacks[i].elems[j] != j){ */
/* printf("The result is false\n"); */
/* return; */
/* } */
/* } */
/* if(stacks[i].cnt != stacks_cnts[i]){ */
/* printf("The result is false\n"); */
/* return; */
/* } */
/* } */
/* for(i=0; i<MAXELEMS; i++) */
/* stacks_cnts[stacks_list[i]]--; */
/* for(i=0; i<n; i++){ */
/* if(stacks_cnts[i] != 0){ */
/* printf("The result is false\n"); */
/* return; */
/* } */
/* } */
for(i=0; i<n; i++)
stacks_cnts[i] = stacks[i].cnt;
for(i=0; i<MAXELEMS; i++)
stacks_cnts[stacks_list[i]]--;
for(i=0; i<n; i++){
if(stacks_cnts[i] != 0){
printf("The result is false\n");
return;
}
}
check = (int*)malloc(MAXELEMS*sizeof(int));
for(i=0; i<MAXELEMS; i++)
check[i] = 0;
for(i=0; i<n; i++)
for(j=0; j<stacks[i].cnt; j++)
check[stacks[i].elems[j]] = 1;
for(i=0; i<MAXELEMS; i++)
if(check[i] != 1){
free(check);
printf("The result is false\n");
return;
}
free(check);
printf("The result is correct!!!\n");
}
|
atomic-1.c | /* { dg-do compile } */
/* { dg-additional-options "-Wno-volatile" { target c++ } } */
int x;
volatile int y;
volatile unsigned char z;
void f1(void)
{
#pragma omp atomic
x++;
#pragma omp atomic
x--;
#pragma omp atomic
++x;
#pragma omp atomic
--x;
#pragma omp atomic
x += 1;
#pragma omp atomic
x -= y;
#pragma omp atomic
x |= 1;
#pragma omp atomic
x &= 1;
#pragma omp atomic
x ^= 1;
#pragma omp atomic
x *= 3;
#pragma omp atomic
x /= 3;
#pragma omp atomic
x /= 3;
#pragma omp atomic
x <<= 3;
#pragma omp atomic
x >>= 3;
}
void f2(void)
{
#pragma omp atomic
y++;
#pragma omp atomic
y--;
#pragma omp atomic
++y;
#pragma omp atomic
--y;
#pragma omp atomic
y += 1;
#pragma omp atomic
y -= x;
#pragma omp atomic
y |= 1;
#pragma omp atomic
y &= 1;
#pragma omp atomic
y ^= 1;
#pragma omp atomic
y *= 3;
#pragma omp atomic
y /= 3;
#pragma omp atomic
y /= 3;
#pragma omp atomic
y <<= 3;
#pragma omp atomic
y >>= 3;
}
void f3(void)
{
#pragma omp atomic
z++;
#pragma omp atomic
z--;
#pragma omp atomic
++z;
#pragma omp atomic
--z;
#pragma omp atomic
z += 1;
#pragma omp atomic
z |= 1;
#pragma omp atomic
z &= 1;
#pragma omp atomic
z ^= 1;
#pragma omp atomic
z *= 3;
#pragma omp atomic
z /= 3;
#pragma omp atomic
z /= 3;
#pragma omp atomic
z <<= 3;
#pragma omp atomic
z >>= 3;
}
|
mipmap_core.c | #include <math.h>
#include <stdlib.h>
#include <memory.h>
#include <stdio.h>
#include "omp.h"
/* C-OMP implementation of FGP-TV [1] denoising/regularization model (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume [REQUIRED]
* 2. lambda - regularization parameter [REQUIRED]
* 3. Number of iterations [OPTIONAL parameter]
* 4. eplsilon: tolerance constant [OPTIONAL parameter]
* 5. TV-type: 'iso' or 'l1' [OPTIONAL parameter]
* 6. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL parameter]
* 7. print information: 0 (off) or 1 (on) [OPTIONAL parameter]
* 8. P1 (dual variable from the previous outer iteration) [OPTIONAL parameter]
* 9. P2 (dual variable from the previous outer iteration) [OPTIONAL parameter]
*
* Output:
* [1] Filtered/regularized image
* [2] last function value
* [3] P1 (dual variable from the previous outer iteration) [if 8 is provided]
* [4] P2 (dual variable from the previous outer iteration) [if 9 is provided]
*
* Example of image denoising:
* figure;
* Im = double(imread('lena_gray_256.tif'))/255; % loading image
* u0 = Im + .05*randn(size(Im)); % adding noise
* u = FGP_TV(single(u0), 0.05, 100, 1e-04);
*
* to compile with OMP support: gcc -shared -Wall -std=c99 -Wl,-soname,FGP_TV -fopenmp -o FGP_TV.so -fPIC FGP_TV.c
* This function is based on the Matlab's code and paper by
* [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems"
*
* D. Kazantsev, 2016-17
*
*/
float copyIm(float *A, float *B, int dimX, int dimY, int dimZ);
float Obj_func2D(float *A, float *D, float *R1, float *R2, float lambda, int dimX, int dimY);
float Grad_func2D(float *P1, float *P2, float *D, float *R1, float *R2, float lambda, int dimX, int dimY);
float Proj_func2D(float *P1, float *P2, int methTV, int dimX, int dimY);
float Rupd_func2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, int dimX, int dimY);
float Obj_func_CALC2D(float *A, float *D, float *funcvalA, float lambda, int dimX, int dimY);
void FGP_TV(float *A, float lambda, int iter, float epsil, int methTV, int nonneg, int printM, int dimX, int dimY, int dimZ, float *D)
{
int ll, j, count;
float *D_old=NULL, *P1=NULL, *P2=NULL, *P1_old=NULL, *P2_old=NULL, *R1=NULL, *R2=NULL, tk, tkp1, re, re1;
//A = (float *) mxGetData(prhs[0]); /*noisy image (2D/3D) */
//lambda = (float) mxGetScalar(prhs[1]); /* regularization parameters */
//iter = 100; /* default iterations number */
//epsil = 0.0001; /* default tolerance constant */
//methTV = 0; /* default isotropic TV penalty */
//nonneg = 0; /* nonnegativity (0 is OFF by default) */
//printM = 0; /* print information (0 is 0FF by default) */
/*output function value (last iteration) */
// plhs[1] = mxCreateNumericMatrix(1, 1, mxSINGLE_CLASS, mxREAL);
//float *funcvalA = (float *) mxGetData(plhs[1]);
// if (mxGetClassID(prhs[0]) != mxSINGLE_CLASS) {mexErrMsgTxt("The input image must be in a single precision"); }
/* Handling Matlab output data*/
// dimX = dim_array[0]; dimY = dim_array[1]; dimZ = dim_array[2];
tk = 1.0f;
tkp1=1.0f;
count = 0;
// re_old = 0.0f;
D_old = (float*) calloc (dimY*dimX,sizeof(float));
P1 = (float*) calloc (dimY*dimX,sizeof(float));
P2 = (float*) calloc (dimY*dimX,sizeof(float));
P1_old = (float*) calloc (dimY*dimX,sizeof(float));
P2_old = (float*) calloc (dimY*dimX,sizeof(float));
R1 = (float*) calloc (dimY*dimX,sizeof(float));
R2 = (float*) calloc (dimY*dimX,sizeof(float));
/* begin iterations */
for(ll=0; ll<iter; ll++) {
/* computing the gradient of the objective function */
Obj_func2D(A, D, R1, R2, lambda, dimX, dimY);
if (nonneg == 1) {
/* apply nonnegativity */
for(j=0; j<dimX*dimY*dimZ; j++) {if (D[j] < 0.0f) D[j] = 0.0f;}
}
/*Taking a step towards minus of the gradient*/
Grad_func2D(P1, P2, D, R1, R2, lambda, dimX, dimY);
/* projection step */
Proj_func2D(P1, P2, methTV, dimX, dimY);
/*updating R and t*/
tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f;
Rupd_func2D(P1, P1_old, P2, P2_old, R1, R2, tkp1, tk, dimX, dimY);
/* calculate norm */
re = 0.0f; re1 = 0.0f;
for(j=0; j<dimX*dimY*dimZ; j++)
{
re += pow(D[j] - D_old[j],2);
re1 += pow(D[j],2);
}
re = sqrt(re)/sqrt(re1);
if (re < epsil) count++;
if (count > 4) {
// Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY);
break; }
/* check that the residual norm is decreasing */
// if (ll > 2) {
// if (re > re_old) {
// Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY);
// break; }}
//re_old = re;
/*printf("%f %i %i \n", re, ll, count); */
/*storing old values*/
copyIm(D, D_old, dimX, dimY, dimZ);
copyIm(P1, P1_old, dimX, dimY, dimZ);
copyIm(P2, P2_old, dimX, dimY, dimZ);
tk = tkp1;
/* calculating the objective function value */
//if (ll == (iter-1)) Obj_func_CALC2D(A, D, funcvalA, lambda, dimX, dimY);
}
if (nonneg == 1) {
/* apply nonnegativity */
for(j=0; j<dimX*dimY*dimZ; j++) {if (D[j] < 0.0f) D[j] = 0.0f;}
}
// if (printM == 1) printf("FGP-TV iterations stopped at iteration %i with the function value %f \n", ll, funcvalA[0]);
free(D_old);free(P1);free(P2);free(R1);free(R2);free(P1_old);free(P2_old);
}
float Obj_func_CALC2D(float *A, float *D, float *funcvalA, float lambda, int dimX, int dimY)
{
int i,j;
float f1, f2, val1, val2;
/*data-related term */
f1 = 0.0f;
for(i=0; i<dimX*dimY; i++) f1 += pow(D[i] - A[i],2);
/*TV-related term */
f2 = 0.0f;
for(i=0; i<dimX; i++) {
for(j=0; j<dimY; j++) {
/* boundary conditions */
if (i == dimX-1) {val1 = 0.0f;} else {val1 = A[(i+1)*dimY + (j)] - A[(i)*dimY + (j)];}
if (j == dimY-1) {val2 = 0.0f;} else {val2 = A[(i)*dimY + (j+1)] - A[(i)*dimY + (j)];}
f2 += sqrt(pow(val1,2) + pow(val2,2));
}}
/* sum of two terms */
funcvalA[0] = 0.5f*f1 + lambda*f2;
return *funcvalA;
}
float Obj_func2D(float *A, float *D, float *R1, float *R2, float lambda, int dimX, int dimY)
{
float val1, val2;
int i, j;
#pragma omp parallel for shared(A,D,R1,R2) private(i,j,val1,val2)
for (i = 0; i<dimX; i++) {
for (j = 0; j<dimY; j++) {
/* boundary conditions */
if (i == 0) { val1 = 0.0f; }
else { val1 = R1[(i - 1)*dimY + (j)]; }
if (j == 0) { val2 = 0.0f; }
else { val2 = R2[(i)*dimY + (j - 1)]; }
D[(i)*dimY + (j)] = A[(i)*dimY + (j)] - lambda*(R1[(i)*dimY + (j)] + R2[(i)*dimY + (j)] - val1 - val2);
}
}
return *D;
}
float Grad_func2D(float *P1, float *P2, float *D, float *R1, float *R2, float lambda, int dimX, int dimY)
{
float val1, val2, multip;
int i, j;
multip = (1.0f / (8.0f*lambda));
#pragma omp parallel for shared(P1,P2,D,R1,R2,multip) private(i,j,val1,val2)
for (i = 0; i<dimX; i++) {
for (j = 0; j<dimY; j++) {
/* boundary conditions */
if (i == dimX - 1) val1 = 0.0f; else val1 = D[(i)*dimY + (j)] - D[(i + 1)*dimY + (j)];
if (j == dimY - 1) val2 = 0.0f; else val2 = D[(i)*dimY + (j)] - D[(i)*dimY + (j + 1)];
P1[(i)*dimY + (j)] = R1[(i)*dimY + (j)] + multip*val1;
P2[(i)*dimY + (j)] = R2[(i)*dimY + (j)] + multip*val2;
}
}
return 1;
}
float Proj_func2D(float *P1, float *P2, int methTV, int dimX, int dimY)
{
float val1, val2, denom;
int i, j;
if (methTV == 0) {
/* isotropic TV*/
#pragma omp parallel for shared(P1,P2) private(i,j,denom)
for (i = 0; i<dimX; i++) {
for (j = 0; j<dimY; j++) {
denom = pow(P1[(i)*dimY + (j)], 2) + pow(P2[(i)*dimY + (j)], 2);
if (denom > 1) {
P1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] / sqrt(denom);
P2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] / sqrt(denom);
}
}
}
}
else {
/* anisotropic TV*/
#pragma omp parallel for shared(P1,P2) private(i,j,val1,val2)
for (i = 0; i<dimX; i++) {
for (j = 0; j<dimY; j++) {
val1 = fabs(P1[(i)*dimY + (j)]);
val2 = fabs(P2[(i)*dimY + (j)]);
if (val1 < 1.0f) { val1 = 1.0f; }
if (val2 < 1.0f) { val2 = 1.0f; }
P1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] / val1;
P2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] / val2;
}
}
}
return 1;
}
float Rupd_func2D(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, int dimX, int dimY)
{
int i, j;
float multip;
multip = ((tk - 1.0f) / tkp1);
#pragma omp parallel for shared(P1,P2,P1_old,P2_old,R1,R2,multip) private(i,j)
for (i = 0; i<dimX; i++) {
for (j = 0; j<dimY; j++) {
R1[(i)*dimY + (j)] = P1[(i)*dimY + (j)] + multip*(P1[(i)*dimY + (j)] - P1_old[(i)*dimY + (j)]);
R2[(i)*dimY + (j)] = P2[(i)*dimY + (j)] + multip*(P2[(i)*dimY + (j)] - P2_old[(i)*dimY + (j)]);
}
}
return 1;
}
/* General Functions */
/*****************************************************************/
/* Copy Image */
float copyIm(float *A, float *B, int dimX, int dimY, int dimZ)
{
int j;
#pragma omp parallel for shared(A, B) private(j)
for(j=0; j<dimX*dimY*dimZ; j++) B[j] = A[j];
return *B;
}
|
168. LU Decompose.c | /**
* \file
* LU decomposition
* square matrix
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Perform LU decomposition on matrix
* \param[in] A matrix to decompose
* \param[out] L output L matrix
* \param[out] U output U matrix
* \param[in] mat_size input square matrix size
*/
int lu_decomposition(double **A, double **L, double **U, int mat_size)
{
int row, col, j;
// regularize each row
for (row = 0; row < mat_size; row++)
{
// Upper triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[row][j] * U[j][col];
// Evaluate U[i,k]
U[row][col] = A[row][col] - lu_sum;
}
// Lower triangular matrix
#ifdef _OPENMP
#pragma omp for
#endif
for (col = row; col < mat_size; col++)
{
if (row == col)
{
L[row][col] = 1.;
continue;
}
// Summation of L[i,j] * U[j,k]
double lu_sum = 0.;
for (j = 0; j < row; j++) lu_sum += L[col][j] * U[j][row];
// Evaluate U[i,k]
L[col][row] = (A[col][row] - lu_sum) / U[row][row];
}
}
return 0;
}
/** Function to display square matrix */
void display(double **A, int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("% 3.3g \t", A[i][j]);
}
putchar('\n');
}
}
/** Main function */
int main(int argc, char **argv)
{
int mat_size = 3; // default matrix size
const int range = 10;
const int range2 = range >> 1;
if (argc == 2)
mat_size = atoi(argv[1]);
srand(time(NULL)); // random number initializer
/* Create a square matrix with random values */
double **A = (double **)malloc(mat_size * sizeof(double *));
double **L = (double **)malloc(mat_size * sizeof(double *)); // output
double **U = (double **)malloc(mat_size * sizeof(double *)); // output
for (int i = 0; i < mat_size; i++)
{
// calloc so that all valeus are '0' by default
A[i] = (double *)calloc(mat_size, sizeof(double));
L[i] = (double *)calloc(mat_size, sizeof(double));
U[i] = (double *)calloc(mat_size, sizeof(double));
for (int j = 0; j < mat_size; j++)
/* create random values in the limits [-range2, range-1] */
A[i][j] = (double)(rand() % range - range2);
}
lu_decomposition(A, L, U, mat_size);
printf("A = \n");
display(A, mat_size);
printf("\nL = \n");
display(L, mat_size);
printf("\nU = \n");
display(U, mat_size);
/* Free dynamically allocated memory */
for (int i = 0; i < mat_size; i++)
{
free(A[i]);
free(L[i]);
free(U[i]);
}
free(A);
free(L);
free(U);
return 0;
} |
makeYbus.h | /*
* makYbus.cuh
*
* Created on: 23/09/2015
* Author: Igor M. Araújo
*/
#ifndef MAKEYBUS_CUH_
#define MAKEYBUS_CUH_
#include <util/complexUtils.h>
#include <util/helper_cuda.h>
#include <iostream>
using namespace std;
__host__ void mkl_computeCfCt(
Branch *branches,
cuDoubleComplex *cooValCf,
int *cooRowCf,
int *cooColCf,
cuDoubleComplex *cooValCt,
int *cooRowCt,
int *cooColCt)
{
#pragma omp parallel for
for (int id = 0; id < H_NBRANCH; id++)
{
Branch l_branch = branches[id];
cooValCf[id] = make_cuDoubleComplex(1, 0);
cooRowCf[id] = l_branch.from + BASE_INDEX;
cooColCf[id] = id + BASE_INDEX;
cooValCt[id] = make_cuDoubleComplex(1, 0);
cooRowCt[id] = l_branch.to + BASE_INDEX;
cooColCt[id] = id + BASE_INDEX;
}
}
__host__ void mkl_computeYfYt(
Bus *buses,
Branch *branches,
cuDoubleComplex *csrValYt,
int *csrRowPtrYt,
int *csrColIndYt,
cuDoubleComplex *csrValYf,
int *csrRowPtrYf,
int *csrColIndYf,
cuDoubleComplex *csrValYsh,
int *csrRowPtrYsh,
int *csrColIndYsh,
vector<pso::Particula::Estrutura> estrutura,
pso::Particula particula) {
#pragma omp parallel for
for (int id = 0; id < H_NBRANCH; id++) {
if (id < H_NBUS) {
Bus l_bus = buses[id];
double Bsh = (l_bus.indiceEstrutura != -1 && estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::SHC) ? particula[l_bus.indiceEstrutura] : l_bus.Bsh ;
csrValYsh[id] = make_cuDoubleComplex(l_bus.Gsh, Bsh);
csrRowPtrYsh[id] = id + BASE_INDEX;
csrColIndYsh[id] = id + BASE_INDEX;
}
cuDoubleComplex Ytt;
cuDoubleComplex Yff;
cuDoubleComplex Yft;
cuDoubleComplex Ytf;
Branch l_branch = branches[id];
int stat = (l_branch.inservice) ? 1 : 0;
cuDoubleComplex impedance = make_cuDoubleComplex(l_branch.R, l_branch.X);
cuDoubleComplex Ys = cuCdiv(make_cuDoubleComplex(stat, 0), impedance);
cuDoubleComplex susceptance = make_cuDoubleComplex(0, l_branch.B);
cuDoubleComplex Bc = cuCmul(make_cuDoubleComplex(stat, 0), susceptance);
cuDoubleComplex tap = (l_branch.tap != 0) ? make_cuDoubleComplex(particula[l_branch.indiceEstrutura], 0) : make_cuDoubleComplex(1, 0);
cuDoubleComplex phase_shifter = make_cuDoubleComplex(0, M_PI / 180.0 * l_branch.shift);
tap = cuCmul(tap, cuCexp(phase_shifter));
Ytt = cuCadd(Ys, cuCdiv(Bc, make_cuDoubleComplex(2, 0)));
Yff = cuCdiv(Ytt, cuCmul(tap, cuConj(tap)));
Yft = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), cuConj(tap));
Ytf = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), tap);
int offsetTo, offsetFrom;
csrRowPtrYf[id] = id * 2 + BASE_INDEX;
offsetTo = (l_branch.from > l_branch.to) ? 0 : 1;
offsetFrom = 1 - offsetTo;
csrColIndYf[id * 2 + offsetTo] = l_branch.to + BASE_INDEX;
csrValYf[id * 2 + offsetTo] = Yft;
csrColIndYf[id * 2 + offsetFrom] = l_branch.from + BASE_INDEX;
csrValYf[id * 2 + offsetFrom] = Yff;
csrRowPtrYt[id] = id * 2 + BASE_INDEX;
offsetTo = (l_branch.from > l_branch.to) ? 0 : 1;
offsetFrom = 1 - offsetTo;
csrColIndYt[id * 2 + offsetTo] = l_branch.to + BASE_INDEX;
csrValYt[id * 2 + offsetTo] = Ytt;
csrColIndYt[id * 2 + offsetFrom] = l_branch.from + BASE_INDEX;
csrValYt[id * 2 + offsetFrom] = Ytf;
if(id == (H_NBRANCH -1)){
id++;
csrRowPtrYt[id] = id * 2 + BASE_INDEX;
csrRowPtrYf[id] = id * 2 + BASE_INDEX;
csrRowPtrYsh[H_NBUS] = H_NBUS + BASE_INDEX;
}
}
}
/* autor: Igor Araújo
* Date : 03/02/2016
* Description: Compute Admittance Matrix using a hybrid approach CPU and GPU, with cuSparse library.
* */
__host__ void mkl_makeYbus( vector<pso::Particula::Estrutura> estrutura, pso::Particula particula,
Bus* buses, Branch* branches )
{
// #1 Matrix Cf and Ct is the same to All tests, so compute only once in the first time.
// #1.1 Compute Matrix Cf and Ct in Coordinate Format (COO).
mkl_computeCfCt(
branches,
cooValCf,
cooRowCf,
cooColCf,
cooValCt,
cooRowCt,
cooColCt);
// #1.2 Sort Matrix Cf by ROW
// #1.3 Convert Matrix Cf in Coordinate Format(COO) to Compressed Sparse Row Format(CSR)
int job[6];
/*job - Array, contains the following conversion parameters:
job[0]
If job[0]=0, the matrix in the CSR format is converted to the coordinate format;
if job[0]=1, the matrix in the coordinate format is converted to the CSR format.
if job[0]=2, the matrix in the coordinate format is converted to the CSR format, and the column indices in CSR representation are sorted in the increasing order within each row.
job[1]
If job[1]=0, zero-based indexing for the matrix in CSR format is used;
if job[1]=1, one-based indexing for the matrix in CSR format is used.
job[2]
If job[2]=0, zero-based indexing for the matrix in coordinate format is used;
if job[2]=1, one-based indexing for the matrix in coordinate format is used.
job[4]
job[4]=nzmax - maximum number of the non-zero elements allowed if job[0]=0.
job[5] - job indicator.
For conversion to the coordinate format:
If job[5]=1, only array rowind is filled in for the output storage.
If job[5]=2, arrays rowind, colind are filled in for the output storage.
If job[5]=3, all arrays rowind, colind, acoo are filled in for the output storage.
For conversion to the CSR format:
If job[5]=0, all arrays acsr, ja, ia are filled in for the output storage.
If job[5]=1, only array ia is filled in for the output storage.
If job[5]=2, then it is assumed that the routine already has been called with the job[5]=1, and the user allocated the required space for storing the output arrays acsr and ja.
*/
job[0] = 2;
job[1] = BASE_INDEX;
job[2] = BASE_INDEX;
job[4] = nnzCf;
job[5] = 0;
int info;
MKL_ZCSRCOO((const int*) &job,(const int*) &H_NBUS, csrValCf, csrColIndCf,csrRowPtrCf, &nnzCf,cooValCf, cooRowCf, cooColCf, &info);
if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
// #1.4 Sort Matrix Ct by ROW
// #1.5 Convert Matrix Ct in Coordinate Format(COO) to Compressed Sparse Row Format(CSR)
job[0] = 2;
job[1] = BASE_INDEX;
job[2] = BASE_INDEX;
job[4] = nnzCt;
job[5] = 0;
MKL_ZCSRCOO((const int*) &job,(const int*) &H_NBUS, csrValCt, csrColIndCt,csrRowPtrCt, &nnzCt,cooValCt, cooRowCt, cooColCt, &info);
if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
// #2 Compute Matrix Yf and Yt
mkl_computeYfYt(
buses,
branches,
csrValYt,
csrRowPtrYt,
csrColIndYt,
csrValYf,
csrRowPtrYf,
csrColIndYf,
csrValYsh,
csrRowPtrYsh,
csrColIndYsh,
estrutura,
particula);
// #3 Compute Admittance Matrix(Ybus) by equation Ybus = Cf * Yf + Ct * Yt + Ysh
// #3.1 Compute Cf * Yf from equation
{
const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = 0;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
nnzCfYf = csrRowPtrCfYf[m] - 1;
csrColIndCfYf = (int*) MKL_malloc(sizeof(int) * nnzCfYf, 64);
csrValCfYf = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCfYf, 64);
}
{
const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCfYf;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
{
const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCfYf;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCf, csrColIndCf, csrRowPtrCf, csrValYf, csrColIndYf, csrRowPtrYf,csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
// #3.2 Compute Ct * Yt from equation
{
const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
nnzCtYt = csrRowPtrCtYt[m] - 1;
csrColIndCtYt = (int*) MKL_malloc(sizeof(int) * nnzCtYt, 64);
csrValCtYt = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCtYt, 64);
}
{
const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
{
const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBRANCH; const int k = H_NBUS;const int nnz = nnzCtYt;
MKL_ZCSRMULTCSR( &transa, &request, &sort, &m, &n, &k, csrValCt, csrColIndCt, csrRowPtrCt, csrValYt, csrColIndYt, csrRowPtrYt,csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
// #3.3 Compute CfYf + CtYt from equation
cuDoubleComplex scalar = make_cuDoubleComplex(1.0,0);
{
const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
nnzCfYfCtYt = csrRowPtrCfYfCtYt[m] - 1;
csrColIndCfYfCtYt = (int*) MKL_malloc(sizeof(int) * nnzCfYfCtYt, 64);
csrValCfYfCtYt = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzCfYfCtYt, 64);
}
{
const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
{
const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzCfYfCtYt;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYf, csrColIndCfYf, csrRowPtrCfYf, &scalar, csrValCtYt, csrColIndCtYt, csrRowPtrCtYt, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
// #3.4 Compute CfYfCtYt + Ysh from equation
{
const char transa = 'N';const int request = 1;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
nnzYbus = csrRowPtrYbus[m] - 1;
csrColIndYbus = (int*) MKL_malloc(sizeof(int) * nnzYbus, 64);
csrValYbus = (cuDoubleComplex*) MKL_malloc(sizeof(cuDoubleComplex) * nnzYbus, 64);
}
{
const char transa = 'N';const int request = 2;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
{
const char transa = 'N';const int request = 0;const int sort = 0;const int m = H_NBUS; const int n = H_NBUS;const int nnz = nnzYbus;
MKL_ZCSRADD( &transa, &request, &sort, &m, &n, csrValCfYfCtYt, csrColIndCfYfCtYt, csrRowPtrCfYfCtYt, &scalar, csrValYsh, csrColIndYsh, csrRowPtrYsh, csrValYbus, csrColIndYbus, csrRowPtrYbus, &nnz, &info);if(info) {printf("MKL Library error in %s at %d", __FILE__, __LINE__);exit(1);}
}
MKL_free(csrColIndCfYf);
MKL_free(csrValCfYf);
MKL_free(csrColIndCtYt);
MKL_free(csrValCtYt);
MKL_free(csrColIndCfYfCtYt);
MKL_free(csrValCfYfCtYt);
}
__global__ void hybrid_computeCfCt(
Branch *branches,
cuDoubleComplex *cooValCf,
int *cooRowCf,
int *cooColCf,
cuDoubleComplex *cooValCt,
int *cooRowCt,
int *cooColCt)
{
int id = ID();
if (id < D_NBRANCH)
{
Branch l_branch = branches[id];
cooValCf[id] = make_cuDoubleComplex(1, 0);
cooRowCf[id] = l_branch.from;
cooColCf[id] = id;
cooValCt[id] = make_cuDoubleComplex(1, 0);
cooRowCt[id] = l_branch.to;
cooColCt[id] = id;
// if(id == (D_NBRANCH -1 )){
// id++;
// cooColCt[id] = id;
// cooColCf[id] = id;
// }
}
}
__global__ void hybrid_computeYfYt(Bus *buses, Branch *branches,
cuDoubleComplex *csrValYt, int *csrRowPtrYt, int *csrColIndYt,
cuDoubleComplex *csrValYf, int *csrRowPtrYf, int *csrColIndYf,
cuDoubleComplex *csrValYsh, int *csrRowPtrYsh, int *csrColIndYsh,
pso::Particula::Estrutura *d_estrutura, double *d_enxame) {
int id = ID();
if (id < D_NBRANCH) {
if (id < D_NBUS) {
Bus l_bus = buses[id];
double Bsh = (l_bus.indiceEstrutura != -1 && d_estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::SHC) ? d_enxame[l_bus.indiceEstrutura] : l_bus.Bsh ;
csrValYsh[id] = make_cuDoubleComplex(l_bus.Gsh, Bsh);
csrRowPtrYsh[id] = id;
csrColIndYsh[id] = id;
}
cuDoubleComplex Ytt;
cuDoubleComplex Yff;
cuDoubleComplex Yft;
cuDoubleComplex Ytf;
Branch l_branch = branches[id];
int stat = (l_branch.inservice) ? 1 : 0;
cuDoubleComplex impedance = make_cuDoubleComplex(l_branch.R,
l_branch.X);
cuDoubleComplex Ys = cuCdiv(make_cuDoubleComplex(stat, 0), impedance);
cuDoubleComplex susceptance = make_cuDoubleComplex(0, l_branch.B);
cuDoubleComplex Bc = cuCmul(make_cuDoubleComplex(stat, 0), susceptance);
cuDoubleComplex tap = (l_branch.tap != 0) ? ((l_branch.indiceEstrutura != -1) ? make_cuDoubleComplex(d_enxame[l_branch.indiceEstrutura], 0) : make_cuDoubleComplex(l_branch.tap, 0)) : make_cuDoubleComplex(1, 0);
cuDoubleComplex phase_shifter = make_cuDoubleComplex(0,
M_PI / 180.0 * l_branch.shift);
tap = cuCmul(tap, cuCexp(phase_shifter));
Ytt = cuCadd(Ys, cuCdiv(Bc, make_cuDoubleComplex(2, 0)));
Yff = cuCdiv(Ytt, cuCmul(tap, cuConj(tap)));
Yft = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), cuConj(tap));
Ytf = cuCdiv(cuCmul(Ys, make_cuDoubleComplex(-1, 0)), tap);
int offsetTo, offsetFrom;
csrRowPtrYf[id] = id * 2;
offsetTo = (l_branch.from > l_branch.to) ? 0 : 1;
offsetFrom = 1 - offsetTo;
csrColIndYf[id * 2 + offsetTo] = l_branch.to;
csrValYf[id * 2 + offsetTo] = Yft;
csrColIndYf[id * 2 + offsetFrom] = l_branch.from;
csrValYf[id * 2 + offsetFrom] = Yff;
csrRowPtrYt[id] = id * 2;
offsetTo = (l_branch.from > l_branch.to) ? 0 : 1;
offsetFrom = 1 - offsetTo;
csrColIndYt[id * 2 + offsetTo] = l_branch.to;
csrValYt[id * 2 + offsetTo] = Ytt;
csrColIndYt[id * 2 + offsetFrom] = l_branch.from;
csrValYt[id * 2 + offsetFrom] = Ytf;
if(id == (D_NBRANCH -1)){
id++;
csrRowPtrYt[id] = id * 2;
csrRowPtrYf[id] = id * 2;
csrRowPtrYsh[D_NBUS] = D_NBUS;
}
}
}
/* autor: Igor Araújo
* Date : 03/02/2016
* Description: Compute Admittance Matrix using a hybrid approach CPU and GPU, with cuSparse library.
* */
__host__ void hybrid_makeYbus(
int nTest,
int sizeEstrutura,
Bus *buses,
Branch *branches)
{
// #1 Matrix Cf and Ct is the same to All tests, so compute only once in the first time.
if (nTest == 0)
{
// #1.1 Compute Matrix Cf and Ct in Coordinate Format (COO).
hybrid_computeCfCt<<<BLOCKS(H_NBRANCH, H_THREADS), H_THREADS, 0, stream[nTest]>>>(
branches,
cooValCf,
cooRowCf,
cooColCf,
cooValCt,
cooRowCt,
cooColCt);
// #1.2 Sort Matrix Cf by ROW
size_t before = pBufferSizeInBytes;
checkCudaErrors(cusparseXcoosort_bufferSizeExt(sparseHandle, H_NBUS, H_NBRANCH, nnzCfcoo, cooRowCf, cooColCf, &pBufferSizeInBytes));
if(pBufferSizeInBytes > before){
checkCudaErrors(cudaMalloc((void**) &pBuffer , pBufferSizeInBytes * sizeof(char)));
}
checkCudaErrors(cusparseCreateIdentityPermutation(sparseHandle, nnzCfcoo, permutation));
checkCudaErrors(cusparseXcoosortByRow(sparseHandle, H_NBUS, H_NBRANCH, nnzCfcoo, cooRowCf, cooColCf, permutation, pBuffer));
checkCudaErrors(cusparseZgthr(sparseHandle, nnzCfcoo, cooValCf, csrValCf, permutation, CUSPARSE_INDEX_BASE_ZERO));
// #1.3 Convert Matrix Cf in Coordinate Format(COO) to Compressed Sparse Row Format(CSR)
checkCudaErrors(cusparseXcoo2csr(sparseHandle, (const int*) cooRowCf, nnzCf, H_NBUS, csrRowPtrCf, CUSPARSE_INDEX_BASE_ZERO));
checkCudaErrors(cudaMemcpy(csrColIndCf, cooColCf, nnzCf * sizeof(int), cudaMemcpyDeviceToDevice));
// #1.4 Sort Matrix Ct by ROW
before = pBufferSizeInBytes;
checkCudaErrors(cusparseXcoosort_bufferSizeExt(sparseHandle, H_NBUS, H_NBRANCH, nnzCtcoo, cooRowCt, cooColCt, &pBufferSizeInBytes));
if(pBufferSizeInBytes > before){
checkCudaErrors(cudaMalloc((void**) &pBuffer , pBufferSizeInBytes * sizeof(char)));
}
checkCudaErrors(cusparseCreateIdentityPermutation(sparseHandle, nnzCtcoo, permutation));
checkCudaErrors(cusparseXcoosortByRow(sparseHandle, H_NBUS, H_NBRANCH, nnzCtcoo, cooRowCt, cooColCt, permutation, pBuffer));
checkCudaErrors(cusparseZgthr(sparseHandle, nnzCtcoo, cooValCt, csrValCt, permutation, CUSPARSE_INDEX_BASE_ZERO));
// #1.5 Convert Matrix Ct in Coordinate Format(COO) to Compressed Sparse Row Format(CSR)
checkCudaErrors(cusparseXcoo2csr(sparseHandle, (const int*) cooRowCt, nnzCt, H_NBUS, csrRowPtrCt, CUSPARSE_INDEX_BASE_ZERO));
checkCudaErrors(cudaMemcpy(csrColIndCt, cooColCt, nnzCt * sizeof(int), cudaMemcpyDeviceToDevice));
}
// #2 Compute Matrix Yf and Yt
hybrid_computeYfYt<<<BLOCKS(H_NBRANCH, H_THREADS), H_THREADS, 0, stream[nTest]>>>(
buses,
branches,
csrValYt + nnzYt * nTest,
csrRowPtrYt,
csrColIndYt,
csrValYf + nnzYf * nTest,
csrRowPtrYf,
csrColIndYf,
csrValYsh + nnzYsh * nTest,
csrRowPtrYsh,
csrColIndYsh,
d_estrutura,
d_enxame + nTest * sizeEstrutura);
// #3 Compute Admittance Matrix(Ybus) by equation Ybus = Cf * Yf + Ct * Yt + Ysh
// #3.1 Compute Cf * Yf from equation
if(nTest == 0)
{
checkCudaErrors(cusparseCreateMatDescr(&descrCf));
checkCudaErrors(cusparseCreateMatDescr(&descrYf));
checkCudaErrors(cusparseCreateMatDescr(&descrCfYf));
checkCudaErrors(cusparseSetMatType(descrCf, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatType(descrYf, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatType(descrCfYf, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseXcsrgemmNnz(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrRowPtrCf, csrColIndCf, descrYf, nnzYf, csrRowPtrYf, csrColIndYf, descrCfYf, csrRowPtrCfYf, &nnzCfYf));
checkCudaErrors(cudaMalloc((void**)&csrColIndCfYf , sizeof(int) * nnzCfYf));
checkCudaErrors(cudaMalloc((void**)&csrValCfYf , sizeof(cuDoubleComplex) * nnzCfYf));
checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrValCf, csrRowPtrCf, csrColIndCf, descrYf, nnzYf, csrValYf, csrRowPtrYf, csrColIndYf, descrCfYf, csrValCfYf, csrRowPtrCfYf, csrColIndCfYf));
}
else
{
checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCf, nnzCf, csrValCf, csrRowPtrCf, csrColIndCf, descrCfYf, nnzYf, csrValYf + nnzYf * nTest, csrRowPtrYf, csrColIndYf, descrCfYf, csrValCfYf, csrRowPtrCfYf, csrColIndCfYf));
}
// #3.2 Compute Ct * Yt from equation
if(nTest == 0)
{
checkCudaErrors(cusparseCreateMatDescr(&descrCt));
checkCudaErrors(cusparseCreateMatDescr(&descrYt));
checkCudaErrors(cusparseCreateMatDescr(&descrCtYt));
checkCudaErrors(cusparseSetMatType(descrCt, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatType(descrYt, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatType(descrCtYt, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseXcsrgemmNnz(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrRowPtrCt, csrColIndCt, descrYt, nnzYt, csrRowPtrYt, csrColIndYt, descrCtYt, csrRowPtrCtYt, &nnzCtYt));
checkCudaErrors(cudaMalloc((void**)&csrColIndCtYt , sizeof(int) * nnzCtYt));
checkCudaErrors(cudaMalloc((void**)&csrValCtYt , sizeof(cuDoubleComplex) * nnzCtYt));
checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrValCt, csrRowPtrCt, csrColIndCt, descrYt, nnzYt, csrValYt, csrRowPtrYt, csrColIndYt, descrCtYt, csrValCtYt, csrRowPtrCtYt, csrColIndCtYt));
}
else
{
checkCudaErrors(cusparseZcsrgemm(sparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, H_NBUS, H_NBUS, H_NBRANCH, descrCt, nnzCt, csrValCt, csrRowPtrCt, csrColIndCt, descrCtYt, nnzYt, csrValYt + nnzYt * nTest, csrRowPtrYt, csrColIndYt, descrCtYt, csrValCtYt, csrRowPtrCtYt, csrColIndCtYt));
}
// #3.3 Compute CfYf + CtYt from equation
if(nTest == 0)
{
checkCudaErrors(cusparseCreateMatDescr(&descrCfYfCtYt));
checkCudaErrors(cusparseSetMatType(descrCfYfCtYt, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseXcsrgeamNnz(sparseHandle, H_NBUS, H_NBUS, descrCfYf, nnzCfYf, csrRowPtrCfYf, csrColIndCfYf, descrCtYt, nnzCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrRowPtrCfYfCtYt, &nnzCfYfCtYt));
checkCudaErrors(cudaMalloc((void**)&csrColIndCfYfCtYt , sizeof(int) * nnzCfYfCtYt));
checkCudaErrors(cudaMalloc((void**)&csrValCfYfCtYt , sizeof(cuDoubleComplex) * nnzCfYfCtYt));
cuDoubleComplex fator = make_cuDoubleComplex(1,0);
checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYf, nnzCfYf, (const cuDoubleComplex*)csrValCfYf, csrRowPtrCfYf, csrColIndCfYf, &fator, descrCtYt, nnzCtYt,(const cuDoubleComplex*) csrValCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt));
}
else
{
cuDoubleComplex fator = make_cuDoubleComplex(1,0);
checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYf, nnzCfYf, (const cuDoubleComplex*)csrValCfYf, csrRowPtrCfYf, csrColIndCfYf, &fator, descrCtYt, nnzCtYt,(const cuDoubleComplex*) csrValCtYt, csrRowPtrCtYt, csrColIndCtYt, descrCfYfCtYt, csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt));
}
// #3.4 Compute CfYfCtYt + Ysh from equation
if(nTest == 0)
{
checkCudaErrors(cusparseCreateMatDescr(&descrYsh));
checkCudaErrors(cusparseCreateMatDescr(&descrYbus));
checkCudaErrors(cusparseSetMatType(descrYsh, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseSetMatType(descrYbus, CUSPARSE_MATRIX_TYPE_GENERAL));
checkCudaErrors(cusparseXcsrgeamNnz(sparseHandle, H_NBUS, H_NBUS, descrCfYfCtYt, nnzCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, descrYsh, nnzYsh, csrRowPtrYsh, csrColIndYsh, descrYbus, csrRowPtrYbus, &nnzYbus));
checkCudaErrors(cudaMalloc((void**)&csrColIndYbus , sizeof(int) * nnzYbus ));
checkCudaErrors(cudaMalloc((void**)&csrValYbus , sizeof(cuDoubleComplex) * nnzYbus * H_NTESTS));
cuDoubleComplex fator = make_cuDoubleComplex(1,0);
checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYfCtYt, nnzCfYfCtYt, (const cuDoubleComplex*)csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, &fator, descrYsh, nnzYsh,(const cuDoubleComplex*) csrValYsh, csrRowPtrYsh, csrColIndYsh, descrYbus, csrValYbus, csrRowPtrYbus, csrColIndYbus));
}
else
{
cuDoubleComplex fator = make_cuDoubleComplex(1,0);
checkCudaErrors(cusparseZcsrgeam(sparseHandle, H_NBUS, H_NBUS, &fator, descrCfYfCtYt, nnzCfYfCtYt, (const cuDoubleComplex*)csrValCfYfCtYt, csrRowPtrCfYfCtYt, csrColIndCfYfCtYt, &fator, descrYsh, nnzYsh,(const cuDoubleComplex*) (csrValYsh + nnzYsh * nTest), csrRowPtrYsh, csrColIndYsh, descrYbus, csrValYbus + nnzYbus * nTest, csrRowPtrYbus, csrColIndYbus));
}
}
#endif /* MAKEYBUS_CUH_ */
|
sort.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <ParTI.h>
#include "sptensor.h"
#include <limits.h>
#include <numa.h>
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r);
static void spt_QuickSortIndexCmode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, int cmode_start, int num_cmode);
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sk_bits);
static void spt_QuickSortIndexMorton2D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits, sptIndex * mode_order);
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits);
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits);
static void spt_QuickSortIndexSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex mode);
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order, sptIndex * eleinds_buf);
static void spt_QuickSortIndexExceptSingleModeRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order, sptElementIndex sk_bits);
static const uint32_t MASKS[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
static const uint32_t SHIFTS[] = {1, 2, 4, 8};
/* Mode order: X -> Y -> Z, x indices are sorted, y and z are Morton order sorted. */
static const uint32_t morton256_z[256] =
{
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
// pre-shifted table for Y coordinates (1 bit to the left)
static const uint32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
// Pre-shifted table for x (2 bits to the left)
static const uint32_t morton256_x[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
void spt_SwapValues(sptSparseTensor *tsr, sptNnzIndex ind1, sptNnzIndex ind2) {
for(sptIndex i = 0; i < tsr->nmodes; ++i) {
sptIndex eleind1 = tsr->inds[i].data[ind1];
tsr->inds[i].data[ind1] = tsr->inds[i].data[ind2];
tsr->inds[i].data[ind2] = eleind1;
}
sptValue val1 = tsr->values.data[ind1];
tsr->values.data[ind1] = tsr->values.data[ind2];
tsr->values.data[ind2] = val1;
}
/****************************
* Functions to determine mode order
****************************/
/**
* Determine the best mode order. Sort order: [mode, (ordered by increasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetBestModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != 0) {
for(sptIndex m=mode_loc; m>=1; --m) {
mode_order[m] = mode_order[m-1];
}
mode_order[0] = mode;
}
free(sorted_ndims);
}
/**
* Determine the worst mode order. Sort order: [(ordered by decreasing dimension sizes)]
*
* @param[out] mode_order a pointer to the array to be filled,
* @param[in] mode mode to do product
* @param[in] ndims tensor dimension sizes
* @param[in] nmodes tensor order
*
*/
void sptGetWorstModeOrder(
sptIndex * mode_order,
sptIndex const mode,
sptIndex const * ndims,
sptIndex const nmodes)
{
sptKeyValuePair * sorted_ndims = (sptKeyValuePair*)malloc(nmodes * sizeof(*sorted_ndims));
for(sptIndex m=0; m<nmodes; ++m) {
sorted_ndims[m].key = m;
sorted_ndims[m].value = ndims[m];
}
/* Increasingly sort */
sptPairArraySort(sorted_ndims, nmodes);
for(sptIndex m=0; m<nmodes; ++m) {
mode_order[m] = sorted_ndims[nmodes - 1 - m].key;
}
/* Find the location of mode */
sptIndex mode_loc = 0;
for(sptIndex m=0; m<nmodes; ++m) {
if(mode_order[m] == mode) {
mode_loc = m;
}
}
/* Shift mode to moder_order[0] */
if(mode_loc != nmodes - 1) {
for(sptIndex m=mode_loc; m<nmodes; ++m) {
mode_order[m] = mode_order[m+1];
}
mode_order[nmodes - 1] = mode;
}
free(sorted_ndims);
}
/**
* Sort COO sparse tensor by Z-Morton order. (The same with "sptPreprocessSparseTensor" function in "convert.c" without setting kschr.)
* Kernels in Row-major order, blocks and elements are in Z-Morton order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorMixedOrder(
sptSparseTensor *tsr,
const sptElementIndex sb_bits,
const sptElementIndex sk_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
int result;
/* Sort tsr in a Row-major Block order to get all kernels. Not use Morton-order for kernels: 1. better support for higher-order tensors by limiting kernel size, because Morton key bit <= 128; */
sptSparseTensorSortIndexRowBlock(tsr, 1, 0, nnz, sk_bits, tk);
sptNnzIndexVector kptr, knnzs;
result = sptNewNnzIndexVector(&kptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
result = sptSetKernelPointers(&kptr, &knnzs, tsr, sk_bits);
spt_CheckError(result, "HiSpTns Preprocess", NULL);
/* Sort blocks in each kernel in Morton-order */
sptNnzIndex k_begin, k_end;
/* Loop for all kernels, 0-kptr.len for OMP code */
for(sptNnzIndex k=0; k<kptr.len - 1; ++k) {
k_begin = kptr.data[k];
k_end = kptr.data[k+1]; // exclusive
/* Sort blocks in each kernel in Morton-order */
sptSparseTensorSortIndexMorton(tsr, 1, k_begin, k_end, sb_bits, tk);
}
return 0;
}
/**
* Sort COO sparse tensor by plain blocked order for modes except mode-n. Blocks are in Row-major order.
* @param tsr a pointer to a sparse tensor
* @return mode pointers
*/
int sptSparseTensorSortPartialIndex(
sptSparseTensor *tsr,
sptIndex const * mode_order,
const sptElementIndex sb_bits,
int const tk)
{
sptNnzIndex nnz = tsr->nnz;
sptIndex * ndims = tsr->ndims;
sptIndex const mode = mode_order[0];
int result;
sptSparseTensorSortIndexCustomOrder(tsr, mode_order, 1, tk);
sptNnzIndexVector sptr;
result = sptNewNnzIndexVector(&sptr, 0, 0);
spt_CheckError(result, "HiSpTns New", NULL);
sptNnzIndex slice_nnz = 0;
sptIndex pre_idx = tsr->inds[mode].data[0];
result = sptAppendNnzIndexVector(&sptr, 0);
for (sptNnzIndex z = 0; z < nnz; ++z ) {
++ slice_nnz;
if (tsr->inds[mode].data[z] > pre_idx ) {
result = sptAppendNnzIndexVector(&sptr, slice_nnz-1);
pre_idx = tsr->inds[mode].data[z];
}
}
result = sptAppendNnzIndexVector(&sptr, nnz);
sptDumpNnzIndexVector(&sptr, stdout);
sptNnzIndex s_begin, s_end;
// Loop for slices
for(sptNnzIndex s = 0; s < ndims[mode]; ++ s) {
s_begin = sptr.data[s];
s_end = sptr.data[s+1]; // exclusive
/* Sort blocks in each kernel in plain row-order */
sptSparseTensorSortIndexRowBlock(tsr, 1, s_begin, s_end, sb_bits, tk);
}
return 0;
}
/**
* Randomly shuffle all nonzeros.
*
* @param[in] tsr tensor to be shuffled
*
*/
void sptGetRandomShuffleElements(sptSparseTensor *tsr) {
sptNnzIndex const nnz = tsr->nnz;
for(sptNnzIndex z=0; z<nnz; ++z) {
srand(z+1);
sptValue rand_val = (sptValue) rand() / (sptValue) RAND_MAX;
sptNnzIndex new_loc = (sptNnzIndex) ( rand_val * nnz ) % nnz;
spt_SwapValues(tsr, z, new_loc);
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] tsr tensor to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void sptGetRandomShuffledIndices(sptSparseTensor *tsr, sptIndex ** map_inds)
{
/* Get randomly renumbering indices */
for(sptIndex m = 0; m < tsr->nmodes; ++m) {
sptIndex dim_len = tsr->ndims[m];
for(long int i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
sptIndex new_loc = (sptIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
sptIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
}
/****************************
* Sorting functions
****************************/
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by Morton-order.
* @param hitsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexMorton(
sptSparseTensor *tsr,
int force,
sptNnzIndex begin,
sptNnzIndex end,
sptElementIndex sb_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
/* TODO: add support for other order tensors */
switch(tsr->nmodes) {
case 3:
spt_QuickSortIndexMorton3D(tsr, begin, end, sb_bits);
break;
case 4:
spt_QuickSortIndexMorton4D(tsr, begin, end, sb_bits);
break;
default:
printf("No support for more than 4th-order tensors yet.\n");
}
}
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleModeRowBlock(
sptSparseTensor *tsr,
int force,
sptNnzIndex begin,
sptNnzIndex end,
sptIndex * const mode_order,
sptElementIndex sk_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexExceptSingleModeRowBlock(tsr, begin, end, mode_order, sk_bits);
}
}
}
}
/**
* Reorder the elements in a COO sparse tensor lexicographically, sorting by row major order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexRowBlock(
sptSparseTensor *tsr,
int force,
sptNnzIndex begin,
sptNnzIndex end,
sptElementIndex sk_bits,
int tk)
{
size_t m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexRowBlock(tsr, begin, end, sk_bits);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting only one mode.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexSingleMode(sptSparseTensor *tsr, int force, sptIndex mode, int tk)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexSingleMode(tsr, 0, tsr->nnz, mode);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleMode(sptSparseTensor *tsr, int force, sptIndex * mode_order, int tk) {
sptIndex m;
int needsort = 0;
sptIndex * eleinds_buf = NULL;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexExceptSingleMode(tsr, 0, tsr->nnz, mode_order, eleinds_buf);
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically, sorting all modes except one. The except mode is NOT ordered.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexExceptSingleModeMorton(sptSparseTensor *tsr, int force, sptIndex * mode_order, sptElementIndex sb_bits, int tk) {
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
switch(tsr->nmodes) {
case 3:
spt_QuickSortIndexMorton2D(tsr, 0, tsr->nnz, sb_bits, mode_order);
break;
case 4:
// spt_QuickSortIndexMorton3D(tsr, 0, tsr->nnz, sb_bits, mode_order);
break;
default:
printf("No support for more than 4th-order tensors yet.\n");
}
}
}
}
}
/**
* Reorder the elements in a sparse tensor lexicographically in a customized order.
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexCustomOrder(sptSparseTensor *tsr, sptIndex const * mode_order, int force, int tk)
{
sptIndex nmodes = tsr->nmodes;
sptIndex m;
sptSparseTensor tsr_temp; // Only copy pointers, not real data.
if(!force && memcmp(tsr->sortorder, mode_order, nmodes * sizeof (sptIndex)) == 0) {
return;
}
tsr_temp.nmodes = nmodes;
tsr_temp.sortorder = tsr->sortorder;
tsr_temp.ndims = malloc(nmodes * sizeof tsr_temp.ndims[0]);
tsr_temp.nnz = tsr->nnz;
tsr_temp.inds = malloc(nmodes * sizeof tsr_temp.inds[0]);
tsr_temp.values = tsr->values;
for(m = 0; m < nmodes; ++m) {
tsr_temp.ndims[m] = tsr->ndims[mode_order[m]];
tsr_temp.inds[m] = tsr->inds[mode_order[m]];
}
sptSparseTensorSortIndex(&tsr_temp, 1, tk);
free(tsr_temp.inds);
free(tsr_temp.ndims);
for(m = 0; m < nmodes; ++m) {
tsr->sortorder[m] = mode_order[m];
}
}
/**
* Reorder the elements in a sparse tensor lexicographically
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndex(sptSparseTensor *tsr, int force, int tk)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndex(tsr, 0, tsr->nnz);
}
}
}
}
/**
* Reorder the elements in a sparse tensor starting from cmode_start with num_cmode
* @param tsr the sparse tensor to operate on
*/
void sptSparseTensorSortIndexCmode(sptSparseTensor *tsr, int force, int tk, int cmode_start, int num_cmode)
{
sptIndex m;
int needsort = 0;
for(m = 0; m < tsr->nmodes; ++m) {
if(tsr->sortorder[m] != m) {
tsr->sortorder[m] = m;
needsort = 1;
}
}
if(needsort || force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
spt_QuickSortIndexCmode(tsr, 0, tsr->nnz, cmode_start, num_cmode);
}
}
}
}
/****************************
* Comparison functions
****************************/
/**
* compare two indices from two identical or distinct sparse tensors lexicographically
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndices(sptSparseTensor * const tsr1, sptNnzIndex loc1, sptSparseTensor * const tsr2, sptNnzIndex loc2)
{
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
int spt_SparseTensorCompareIndicesCmode(sptSparseTensor * const tsr1, sptNnzIndex loc1, sptSparseTensor * const tsr2, sptNnzIndex loc2, int cmode_start, int num_cmode)
{
int i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = cmode_start; i < cmode_start + num_cmode; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
for(i = 0; i < cmode_start; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically in all modes except mode
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @param mode the mode to be excluded in comparison
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndicesExceptSingleMode(sptSparseTensor * const tsr1, sptNnzIndex loc1, sptSparseTensor * const tsr2, sptNnzIndex loc2, sptIndex * const mode_order)
{
sptIndex i, m;
sptIndex eleind1, eleind2;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
#if 0
switch(tsr1->nmodes) {
case 3:
m = mode_order[0];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
m = mode_order[1];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
break;
case 4:
m = mode_order[0];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
m = mode_order[1];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
m = mode_order[2];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
break;
default:
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
eleind1 = tsr1->inds[m].data[loc1];
eleind2 = tsr2->inds[m].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
}
#endif
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically in the num_ncmodes modes under the specified mode_order.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @param mode the mode to be excluded in comparison
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndicesCustomize(sptSparseTensor * const tsr1, sptNnzIndex loc1, sptIndex * const mode_order_1, sptSparseTensor * const tsr2, sptNnzIndex loc2, sptIndex * const mode_order_2, sptIndex num_ncmodes)
{
sptIndex i, m1, m2;
sptIndex eleind1, eleind2;
for(i = 0; i < num_ncmodes; ++ i) {
m1 = mode_order_1[i];
m2 = mode_order_2[i];
eleind1 = tsr1->inds[m1].data[loc1];
eleind2 = tsr2->inds[m2].data[loc2];
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
}
return 0;
}
int spt_SparseTensorCompareIndicesExceptSingleModeCantor(sptSparseTensor * const tsr1, sptNnzIndex loc1, sptSparseTensor * const tsr2, sptNnzIndex loc2, sptIndex * const mode_order)
{
sptIndex i, m;
sptIndex eleind1, eleind2;
double val1, val2;
double prods, presum;
double invfactorials[7] = {0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0, 1.0/720.0}; /*we memorize factorials*/
assert(tsr1->nmodes == tsr2->nmodes);
assert(tsr1->nmodes <= 6); /*just so that we memorize only 6 factorials*/
// printf("loc1: %lu, loc2: %lu\n", loc1, loc2);
val1 = presum = 0.0;
for(i = 0; i < tsr1->nmodes - 1; ++ i) {
m = mode_order[i];
eleind1 = tsr1->inds[m].data[loc1];
// printf("mode %u: eleind1: %u\n", m, eleind1);
presum = presum + eleind1;
prods = presum;
for (sptIndex jj = 1; jj < i+1; jj ++)
prods = prods * (presum + jj);
// printf("val1: presum: %lf, prods: %lf \n", presum, prods);
val1 += invfactorials[i+1] * prods;
}
// printf("val1: %lf \n", val1);
val2 = presum = 0.0;
for(i = 0; i < tsr2->nmodes - 1; ++ i) {
m = mode_order[i];
eleind2 = tsr2->inds[m].data[loc2];
// printf("mode %u: eleind2: %u\n", m, eleind2);
presum = presum + eleind2;
prods = presum;
for (sptIndex jj=1; jj < i+1; jj ++)
prods = prods * (presum+jj);
// printf("val2: presum: %lf, prods: %lf \n", presum, prods);
val2 += invfactorials[i+1] * prods;
}
// printf("val2: %lf \n\n", val2);
if(val1 < val2)
return -1;
else if (val1 > val2)
return 1;
else
return 0;
}
/**
* Comapre two index arrays lexicographically
* @param inds1 the first indices to be compared
* @param inds2 the second indices to be compared
* @param len the length of both inds1 and inds2
* @return 1 for in the range; otherwise return -1.
*/
int spt_SparseTensorCompareIndicesRange(sptSparseTensor * const tsr, sptNnzIndex loc, sptIndex * const inds1, sptIndex * const inds2)
{
sptIndex i;
for(i = 0; i < tsr->nmodes; ++i) {
sptIndex eleind = tsr->inds[i].data[loc];
if(eleind < inds1[i] || eleind >= inds2[i]) {
return -1;
}
}
return 1;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords. Compare all modes except one. Also inter- and intra- blocks are both sorted.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndicesExceptSingleModeRowBlock(
sptSparseTensor * const tsr1,
sptNnzIndex loc1,
sptSparseTensor * const tsr2,
sptNnzIndex loc2,
sptIndex * const mode_order,
sptElementIndex sk_bits)
{
sptIndex i, m;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes - 1; ++i) {
m = mode_order[i];
sptIndex eleind1 = tsr1->inds[m].data[loc1];
sptIndex eleind2 = tsr2->inds[m].data[loc2];
sptIndex blkind1 = eleind1 >> sk_bits;
sptIndex blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
} else if(eleind1 < eleind2) { // if blkind1 == blkind2
return -1;
} else if(eleind1 > eleind2) { // if blkind1 == blkind2
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using block index as keywords.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndicesRowBlock(
sptSparseTensor * const tsr1,
sptNnzIndex loc1,
sptSparseTensor * const tsr2,
sptNnzIndex loc2,
sptElementIndex sk_bits)
{
sptIndex i;
assert(tsr1->nmodes == tsr2->nmodes);
for(i = 0; i < tsr1->nmodes; ++i) {
sptIndex eleind1 = tsr1->inds[i].data[loc1];
sptIndex eleind2 = tsr2->inds[i].data[loc2];
sptIndex blkind1 = eleind1 >> sk_bits;
sptIndex blkind2 = eleind2 >> sk_bits;
// printf("blkind1: %lu, blkind2: %lu\n", blkind1, blkind2);
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
}
return 0;
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using 2D Z-Morton ordering recursively.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
int spt_SparseTensorCompareIndicesMorton2D(
sptSparseTensor * const tsr1,
uint64_t loc1,
sptSparseTensor * const tsr2,
uint64_t loc2,
sptIndex * mode_order,
sptElementIndex sb_bits)
{
assert(tsr1->nmodes == tsr2->nmodes);
uint64_t mkey1 = 0, mkey2 = 0;
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[mode_order[0]].data[loc1];
uint32_t y1 = tsr1->inds[mode_order[1]].data[loc1];
uint32_t x2 = tsr2->inds[mode_order[0]].data[loc2];
uint32_t y2 = tsr2->inds[mode_order[1]].data[loc2];
/* Compare block indices */
sptIndex blk_x1 = x1 >> sb_bits;
sptIndex blk_y1 = y1 >> sb_bits;
sptIndex blk_x2 = x2 >> sb_bits;
sptIndex blk_y2 = y2 >> sb_bits;
if(blk_x1 < blk_x2) {
return -1;
} else if(blk_x1 > blk_x2) {
return 1;
} else if(blk_y1 < blk_y2) { // if blk_x1 == blk_x2
return -1;
} else if(blk_y1 > blk_y2) { // if blk_x1 == blk_x2
return 1;
}
/* blk_x1 == blk_x2, blk_y1 == blk_y2, sort inside a block in Z-Morton order */
uint64_t x = x1 - (blk_x1 << sb_bits);
uint64_t y = y1 - (blk_y1 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey1 = y | (x << 1);
x = x2 - (blk_x2 << sb_bits);
y = y2 - (blk_y2 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey2 = y | (x << 1);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support 3-D, 4-D for uint32_t indices.
* When tensor order is large than 5, index ranges are limited.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton3D(
sptSparseTensor * const tsr1,
uint64_t loc1,
sptSparseTensor * const tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1 = 0, mkey2 = 0;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
mkey1 = morton256_z[(z1 >> 24) & 0xFF ] |
morton256_y[(y1 >> 24) & 0xFF ] |
morton256_x[(x1 >> 24) & 0xFF ];
mkey1 = mkey1 << 72 |
morton256_z[(z1 >> 16) & 0xFF ] |
morton256_y[(y1 >> 16) & 0xFF ] |
morton256_x[(x1 >> 16) & 0xFF ];
mkey1 = mkey1 << 48 |
morton256_z[(z1 >> 8) & 0xFF ] |
morton256_y[(y1 >> 8) & 0xFF ] |
morton256_x[(x1 >> 8) & 0xFF ];
mkey1 = mkey1 << 24 |
morton256_z[(z1) & 0xFF ] |
morton256_y[(y1) & 0xFF ] |
morton256_x[(x1) & 0xFF ];
mkey2 = morton256_z[(z2 >> 24) & 0xFF ] |
morton256_y[(y2 >> 24) & 0xFF ] |
morton256_x[(x2 >> 24) & 0xFF ];
mkey2 = mkey2 << 72 |
morton256_z[(z2 >> 16) & 0xFF ] |
morton256_y[(y2 >> 16) & 0xFF ] |
morton256_x[(x2 >> 16) & 0xFF ];
mkey2 = mkey2 << 48 |
morton256_z[(z2 >> 8) & 0xFF ] |
morton256_y[(y2 >> 8) & 0xFF ] |
morton256_x[(x2 >> 8) & 0xFF ];
mkey2 = mkey2 << 24 |
morton256_z[(z2) & 0xFF ] |
morton256_y[(y2) & 0xFF ] |
morton256_x[(x2) & 0xFF ];
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/**
* compare two indices from two identical or distinct sparse tensors lexicographically, using Z-Morton ordering recursively, freely support arbitrary tensor orders.
* @param tsr1 the first sparse tensor
* @param loc1 the order of the element in the first sparse tensor whose index is to be compared
* @param tsr2 the second sparse tensor
* @param loc2 the order of the element in the second sparse tensor whose index is to be compared
* @return -1 for less, 0 for equal, 1 for greater
*/
static int spt_SparseTensorCompareIndicesMorton4D(
sptSparseTensor * const tsr1,
uint64_t loc1,
sptSparseTensor * const tsr2,
uint64_t loc2)
{
sptMortonIndex mkey1, mkey2;
assert(tsr1->nmodes == tsr2->nmodes);
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = tsr1->inds[0].data[loc1];
uint32_t y1 = tsr1->inds[1].data[loc1];
uint32_t z1 = tsr1->inds[2].data[loc1];
uint32_t w1 = tsr1->inds[3].data[loc1];
uint32_t x2 = tsr2->inds[0].data[loc2];
uint32_t y2 = tsr2->inds[1].data[loc2];
uint32_t z2 = tsr2->inds[2].data[loc2];
uint32_t w2 = tsr2->inds[3].data[loc2];
static const uint64_t MASKS_64[]={0x5555555555555555, 0x3333333333333333, 0x0F0F0F0F0F0F0F0F, 0x00FF00FF00FF00FF, 0x0000FFFF0000FFFF};
static const uint64_t SHIFTS_64[]= {1, 2, 4, 8, 16};
static sptMortonIndex MASKS_128[] = {
(sptMortonIndex)0x5555555555555555 << 64 | 0x5555555555555555,
(sptMortonIndex)0x3333333333333333 << 64 | 0x3333333333333333,
(sptMortonIndex)0x0F0F0F0F0F0F0F0F << 64 | 0x0F0F0F0F0F0F0F0F,
(sptMortonIndex)0x00FF00FF00FF00FF << 64 | 0x00FF00FF00FF00FF,
(sptMortonIndex)0x0000FFFF0000FFFF << 64 | 0x0000FFFF0000FFFF,
(sptMortonIndex)0x00000000FFFFFFFF << 64 | 0x00000000FFFFFFFF};
static const uint64_t SHIFTS_128[]= {1, 2, 4, 8, 16, 32};
// sptMortonIndex tmp_mask = MASKS_128[2];
// printf("tmp_mask: high: %"PRIX64 " ; low: %"PRIX64 " .\n", (uint64_t)(tmp_mask >> 64), (uint64_t)tmp_mask);
uint64_t tmp_64;
sptMortonIndex x, y, z, w;
/**** compute mkey1 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w1;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
// mkey1 = x | (y << 1) | (z << 2) | (w << 3);
mkey1 = w | (z << 1) | (y << 2) | (x << 3);
/**** compute mkey2 ****/
/* compute correct x, 32bit -> 64bit first */
tmp_64 = x2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct x, 64bit -> 128bit */
x = tmp_64;
x = (x | (x << SHIFTS_128[5])) & MASKS_128[5];
x = (x | (x << SHIFTS_128[4])) & MASKS_128[4];
x = (x | (x << SHIFTS_128[3])) & MASKS_128[3];
x = (x | (x << SHIFTS_128[2])) & MASKS_128[2];
x = (x | (x << SHIFTS_128[1])) & MASKS_128[1];
x = (x | (x << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct y, 32bit -> 64bit first */
tmp_64 = y2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct y, 64bit -> 128bit */
y = tmp_64;
y = (y | (y << SHIFTS_128[5])) & MASKS_128[5];
y = (y | (y << SHIFTS_128[4])) & MASKS_128[4];
y = (y | (y << SHIFTS_128[3])) & MASKS_128[3];
y = (y | (y << SHIFTS_128[2])) & MASKS_128[2];
y = (y | (y << SHIFTS_128[1])) & MASKS_128[1];
y = (y | (y << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct z, 32bit -> 64bit first */
tmp_64 = z2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct z, 64bit -> 128bit */
z = tmp_64;
z = (z | (z << SHIFTS_128[5])) & MASKS_128[5];
z = (z | (z << SHIFTS_128[4])) & MASKS_128[4];
z = (z | (z << SHIFTS_128[3])) & MASKS_128[3];
z = (z | (z << SHIFTS_128[2])) & MASKS_128[2];
z = (z | (z << SHIFTS_128[1])) & MASKS_128[1];
z = (z | (z << SHIFTS_128[0])) & MASKS_128[0];
/* compute correct w, 32bit -> 64bit first */
tmp_64 = w2;
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[4])) & MASKS_64[4];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[3])) & MASKS_64[3];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[2])) & MASKS_64[2];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[1])) & MASKS_64[1];
tmp_64 = (tmp_64 | (tmp_64 << SHIFTS_64[0])) & MASKS_64[0];
/* compute correct w, 64bit -> 128bit */
w = tmp_64;
w = (w | (w << SHIFTS_128[5])) & MASKS_128[5];
w = (w | (w << SHIFTS_128[4])) & MASKS_128[4];
w = (w | (w << SHIFTS_128[3])) & MASKS_128[3];
w = (w | (w << SHIFTS_128[2])) & MASKS_128[2];
w = (w | (w << SHIFTS_128[1])) & MASKS_128[1];
w = (w | (w << SHIFTS_128[0])) & MASKS_128[0];
mkey2 = w | (z << 1) | (y << 2) | (x << 3);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
/****************************
* Quicksort functions
****************************/
static void spt_QuickSortIndexMorton2D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits, sptIndex * mode_order)
{
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton2D(tsr, i, tsr, p, mode_order, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", i, p, spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p));
++i;
}
while(spt_SparseTensorCompareIndicesMorton2D(tsr, p, tsr, j, mode_order, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", p, j,spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j));
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, sb_bits)
{
spt_QuickSortIndexMorton2D(tsr, l, i, sb_bits, mode_order);
}
spt_QuickSortIndexMorton2D(tsr, i, r, sb_bits, mode_order);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton3D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits)
{
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p) < 0) {
// printf("(%lu, %lu) result: %d\n", i, p, spt_SparseTensorCompareIndicesMorton3D(tsr, i, tsr, p));
++i;
}
while(spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j) < 0) {
// printf("(%lu, %lu) result: %d\n", p, j,spt_SparseTensorCompareIndicesMorton3D(tsr, p, tsr, j));
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, sb_bits)
{
spt_QuickSortIndexMorton3D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton3D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexMorton4D(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sb_bits)
{
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesMorton4D(tsr, i, tsr, p) < 0) {
// printf("(%lu, %lu) result: %d\n", i, p, spt_SparseTensorCompareIndicesMorton(tsr, i, tsr, p));
++i;
}
while(spt_SparseTensorCompareIndicesMorton4D(tsr, p, tsr, j) < 0) {
// printf("(%lu, %lu) result: %d\n", p, j,spt_SparseTensorCompareIndicesMorton(tsr, p, tsr, j));
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, sb_bits)
{
spt_QuickSortIndexMorton4D(tsr, l, i, sb_bits);
}
spt_QuickSortIndexMorton4D(tsr, i, r, sb_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexExceptSingleModeRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * const mode_order, sptElementIndex sk_bits)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesExceptSingleModeRowBlock(tsr, i, tsr, p, mode_order, sk_bits) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesExceptSingleModeRowBlock(tsr, p, tsr, j, mode_order, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, sk_bits)
{
spt_QuickSortIndexExceptSingleModeRowBlock(tsr, l, i, mode_order, sk_bits);
}
spt_QuickSortIndexExceptSingleModeRowBlock(tsr, i, r, mode_order, sk_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexRowBlock(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptElementIndex sk_bits)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesRowBlock(tsr, i, tsr, p, sk_bits) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesRowBlock(tsr, p, tsr, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, sk_bits)
{
spt_QuickSortIndexRowBlock(tsr, l, i, sk_bits);
}
spt_QuickSortIndexRowBlock(tsr, i, r, sk_bits);
#pragma omp taskwait
}
static void spt_QuickSortIndexSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex mode)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(tsr->inds[mode].data[i] < tsr->inds[mode].data[p]) {
++i;
}
while(tsr->inds[mode].data[p] < tsr->inds[mode].data[j]) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, mode)
{
spt_QuickSortIndexSingleMode(tsr, l, i, mode);
}
spt_QuickSortIndexSingleMode(tsr, i, r, mode);
#pragma omp taskwait
}
static void spt_InsertSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order)
{
long int j;
for(sptNnzIndex i = l; i < r; ++i) {
j = i - 1;
// for(sptIndex m = 0; m < tsr->nmodes; ++m) {
// eleinds_buf[m] = tsr->inds[m].data[i];
// }
// sptValue val = tsr->values.data[i];
while (j >= 0 && spt_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, j, mode_order) < 0)
{
// for(sptIndex m = 0; m < tsr->nmodes; ++m) {
// tsr->inds[m].data[j+1] = tsr->inds[m].data[j];
// }
// tsr->values.data[j+1] = tsr->values.data[j];
/* Since j and j+1 are adjacent, so the extra overhead of assigning indices and value to j is trivial. */
spt_SwapValues(tsr, j+1, j);
-- j;
}
// for(sptIndex m = 0; m < tsr->nmodes; ++m) {
// tsr->inds[m].data[j+1] = eleinds_buf[m];
// }
// tsr->values.data[j+1] = val;
}
return;
}
static void spt_QuickSortIndexExceptSingleMode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, sptIndex * mode_order, sptIndex * eleinds_buf)
{
sptNnzIndex i, j, p;
if(r-l < INSERTION_SORT_LENGTH) {
// eleinds_buf = (sptIndex *)malloc(tsr->nmodes * sizeof(*eleinds_buf));
spt_InsertSortIndexExceptSingleMode(tsr, l, r, mode_order);
// free(eleinds_buf);
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, i, tsr, p, mode_order) < 0) {
// while(spt_SparseTensorCompareIndicesExceptSingleModeCantor(tsr, i, tsr, p, mode_order) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesExceptSingleMode(tsr, p, tsr, j, mode_order) < 0) {
// while(spt_SparseTensorCompareIndicesExceptSingleModeCantor(tsr, p, tsr, j, mode_order) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(tsr, mode_order)
{
// int tid_tmp = omp_get_thread_num();
// printf("[%lu, %lu] tid_tmp: %d\n", l, i, tid_tmp);
spt_QuickSortIndexExceptSingleMode(tsr, l, i, mode_order, eleinds_buf);
}
// int tid_tmp = omp_get_thread_num();
// printf("[%lu, %lu] tid_tmp: %d\n", i, r, tid_tmp);
spt_QuickSortIndexExceptSingleMode(tsr, i, r, mode_order, eleinds_buf);
#pragma omp taskwait
}
static void spt_QuickSortIndex(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndices(tsr, i, tsr, p) < 0) {
++i;
}
while(spt_SparseTensorCompareIndices(tsr, p, tsr, j) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task
{ spt_QuickSortIndex(tsr, l, i); }
#pragma omp task
{ spt_QuickSortIndex(tsr, i, r); }
}
static void spt_QuickSortIndexCmode(sptSparseTensor *tsr, sptNnzIndex l, sptNnzIndex r, int cmode_start, int num_cmode)
{
sptNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(spt_SparseTensorCompareIndicesCmode(tsr, i, tsr, p, cmode_start, num_cmode) < 0) {
++i;
}
while(spt_SparseTensorCompareIndicesCmode(tsr, p, tsr, j, cmode_start, num_cmode) < 0) {
--j;
}
if(i >= j) {
break;
}
spt_SwapValues(tsr, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task
{ spt_QuickSortIndexCmode(tsr, l, j, cmode_start, num_cmode); }
#pragma omp task
{ spt_QuickSortIndexCmode(tsr, i, r, cmode_start, num_cmode); }
}
//Binary search: return the first target in an array within a upper and lower bound
sptNnzIndex sptBinarySearch(sptIndex *array, int arrayStart, int arrayEnd, sptIndex target) {
int low = arrayStart, high = arrayEnd;
while (low < high) {
sptNnzIndex mid = low + (high - low)/2;
//printf("low: %lu, high: %lu, mid: %lu, array[mid]: %lu, target: %lu\n", low, high, mid, array[mid], target);
if (array[mid] < target)
low = mid + 1;
else
high = mid;
}
return low;
}
unsigned int ht_size;
//Hash table for SPA
table_t *htCreate(const unsigned int size){
table_t *t = ( table_t*)malloc(sizeof( table_t));
t->size = size;
ht_size = size;
t->list = ( node_t**)malloc(sizeof( node_t*)*size);
unsigned int i;
for(i=0;i<size;i++)
t->list[i] = NULL;
return t;
}
unsigned int htHashCode(unsigned long long key){
return key%ht_size;
}
void htUpdate( table_t *t, unsigned long long key, sptValue val){
unsigned int pos = htHashCode(key);
node_t *list = t->list[pos];
node_t *temp = list;
while(temp){
if(temp->key==key){
temp->val = val;
return;
}
temp = temp->next;
}
}
void htInsert( table_t *t, unsigned long long key, sptValue val){
unsigned int pos = htHashCode(key);
node_t *newNode = ( node_t*)malloc(sizeof( node_t));
node_t *list = t->list[pos];
// node_t *temp = list;
newNode->key = key;
newNode->val = val;
newNode->next = list;
t->list[pos] = newNode;
}
sptValue htGet( table_t *t, unsigned long long key){
unsigned int pos = htHashCode(key);
node_t *list = t->list[pos];
node_t *temp = list;
while(temp){
if(temp->key==key){
return temp->val;
}
temp = temp->next;
}
return LONG_MIN;
}
void htFree( table_t *t){
free(t->list);
free(t);
}
unsigned int tensor_ht_size;
tensor_table_t *tensor_htCreate(const unsigned int size){
tensor_table_t *t = ( tensor_table_t*)malloc(sizeof( tensor_table_t));
t->size = size;
tensor_ht_size = size;
t->list = ( tensor_node_t**) malloc(sizeof( tensor_node_t*)*size);
unsigned int i;
for(i=0;i<size;i++)
t->list[i] = NULL;
return t;
}
unsigned int tensor_htHashCode(unsigned long long key){
return key%tensor_ht_size;
}
void tensor_htUpdate( tensor_table_t *t, unsigned long long key_cmode, unsigned long long key_fmode, sptValue value){
unsigned int pos = tensor_htHashCode(key_cmode);
tensor_node_t *list = t->list[pos];
tensor_node_t *temp = list;
while(temp){
if(temp->key==key_cmode){
tensor_htAppendValueVector(&temp->val, key_fmode, value);
return;
}
temp = temp->next;
}
}
void tensor_htInsert(tensor_table_t *t, unsigned long long key_cmodes, unsigned long long key_fmodes, sptValue value){
tensor_node_t *newNode = ( tensor_node_t*)malloc(sizeof( tensor_node_t));
tensor_value my_vec;
newNode->key = key_cmodes;
tensor_htNewValueVector(&my_vec, 0, 0);
tensor_htAppendValueVector(&my_vec, key_fmodes, value);
newNode->val = my_vec;
unsigned int pos = tensor_htHashCode(key_cmodes);
tensor_node_t *list = t->list[pos];
newNode->next = list;
t->list[pos] = newNode;
}
tensor_value tensor_htGet( tensor_table_t *t, unsigned long long key){
unsigned int pos = tensor_htHashCode(key);
tensor_node_t *list = t->list[pos];
tensor_node_t *temp = list;
while(temp){
if(temp->key==key){
return temp->val;
}
temp = temp->next;
}
tensor_value result;
result.len=0;
return result;
}
void tensor_htFree( tensor_table_t *t){
free(t->list);
free(t);
}
int tensor_htNewValueVector(tensor_value *vec, unsigned int len, unsigned int cap) {
if(cap < len) {
cap = len;
}
if(cap < 2) {
cap = 2;
}
vec->len = len;
vec->cap = cap;
vec->key_FM = malloc(cap * sizeof *vec->key_FM);
vec->val = malloc(cap * sizeof *vec->val);
memset(vec->key_FM, 0, cap * sizeof *vec->key_FM);
memset(vec->val, 0, cap * sizeof *vec->val);
return 0;
}
int tensor_htAppendValueVector(tensor_value *vec, unsigned long long key_FM, sptValue val) {
if(vec->cap <= vec->len) {
#ifndef MEMCHECK_MODE
sptNnzIndex newcap = vec->cap + vec->cap/2;
#else
sptNnzIndex newcap = vec->len+1;
#endif
unsigned long long *new_key_FM = realloc(vec->key_FM, newcap * sizeof *vec->key_FM);
sptValue *new_val= realloc(vec->val, newcap * sizeof *vec->val);
vec->cap = newcap;
vec->key_FM = new_key_FM;
vec->val = new_val;
}
vec->key_FM[vec->len] = key_FM;
vec->val[vec->len] = val;
++vec->len;
return 0;
}
void tensor_htFreeValueVector(tensor_value *vec) {
vec->len = 0;
vec->cap = 0;
free(vec->key_FM);
free(vec->val);
} |
wpapsk.h | /*
* This software is Copyright (c) 2012 Lukas Odzioba <lukas dot odzioba at gmail dot com>
* and Copyright (c) 2012-2014 magnum
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* hccap format was introduced by oclHashcat-plus (now renamed to hashcat),
* and it is described here: http://hashcat.net/wiki/hccap
* Code is based on Aircrack-ng source
*/
#ifndef _WPAPSK_H
#define _WPAPSK_H
#include <stdint.h>
#include <assert.h>
#if HAVE_OPENSSL_CMAC_H
#include <openssl/cmac.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "hmacmd5.h"
#include "hmac_sha.h"
#include "sha2.h"
#include "hccap.h"
#define BINARY_SIZE sizeof(mic_t)
#define BINARY_ALIGN 4
#define PLAINTEXT_LENGTH 63 /* We can do 64 but spec. says 63 */
#define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t))
#define SALT_ALIGN MEM_ALIGN_NONE
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define FORMAT_TAG "$WPAPSK$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
typedef struct
{
unsigned char keymic[16];
} mic_t;
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH + 1];
} wpapsk_password;
typedef struct {
uint32_t v[8];
} wpapsk_hash;
typedef struct {
uint32_t length;
#ifdef JOHN_OCL_WPAPSK
uint8_t eapol[256 + 64];
uint32_t eapol_size;
uint8_t data[64 + 12];
#endif
uint8_t salt[36]; // essid
} wpapsk_salt;
static struct fmt_tests tests[] = {
/* WPA2 testcase from http://wiki.wireshark.org/SampleCaptures */
{"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "Induction"},
{"$WPAPSK$Harkonen#./FgTY0../B4zX6AKFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL.WK3GkF2rXfkPFGGi38MHkHDMbH.sm49Vc3pO4HPSUJE21.5I0.Ec.2........../KFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL..................................................................3X.I.E..1uk2.E..1uk2.E..1uk0.E..................................................................................................................................................................................../t.....U...BIpIs8sePU4r8yNnOxKHfM", "12345678"},
/* WPA (MD5), from aircrack-ng tests */
{"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "biscotte"},
/* Maximum length, 63 characters */
{"$WPAPSK$Greased Lighting#kA5.CDNB.07cofsOMXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGqgvfcXnuF1f7L5fgn4fQMLmDrKjdBNjb6LClRmfLiTYk21.5I0.Ec............7MXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGo.................................................................3X.I.E..1uk2.E..1uk2.E..1uk00...................................................................................................................................................................................../t.....U...D06LUdWVfGPaP1Oa3AV9Hg", "W*A5z&1?op2_L&Hla-OA$#5i_Lu@F+6d?je?u5!6+6766eluu7-l+jOEkIwLe90"},
{"$WPAPSK$hello#JUjQmBbOHUY4RTqMpGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUDMgZfery1qJTHYVn2Faso/kUDDjr3y8gspK7viz8BCJE21.5I0.Ec............/pGc9EjqGdCxMZPWNXBNd1ejNDoFuemrLl27juYlDDUA.................................................................3X.I.E..1uk2.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...9Py59nqygwiar49oOKA3RY", "12345678"},
#if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK)
/* 802.11w with WPA-PSK-SHA256 */
{"$WPAPSK$hello#HY6.hTXZv.v27BkPGuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30LZAA3uaEfy2U2tJQi.VICk4hqn3V5m7W3lNHSJYW5vLE21.5I0.Eg............/GuhkCnLAKxYHlTWYs.4yuqVSNAip3SeixhErtNMV30I.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4....................................................................................................................................................................................../t.....k.../Ms4UxzvlNw5hOM1igIeo6", "password"},
/* 802.11w with WPA-PSK-SHA256, https://github.com/neheb */
{"$WPAPSK$Neheb#g9a8Jcre9D0WrPnEN4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw04ASqHgvo12wJYJywulb6pWM6C5uqiMPNKNe9pkr6LE61.5I0.Eg.2..........1N4QXDbA5NwAy5TVpkuoChMdFfL/8Dus4i/X.lTnfwuw.................................................................3X.I.E..1uk2.E..1uk2.E..1uk4X...................................................................................................................................................................................../t.....k...0sHl.mVkiHW.ryNchcMd4g", "bo$$password"},
#endif
{NULL}
};
/** Below are common variables used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/
static hccap_t hccap; ///structure with hccap data
static wpapsk_salt currentsalt; ///structure for essid
static mic_t *mic; ///table for MIC keys
#ifndef JOHN_OCL_WPAPSK
static wpapsk_password *inbuffer; ///table for candidate passwords
static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU
#endif
static int new_keys = 1;
static char last_ssid[sizeof(hccap.essid)];
/** Below are common functions used by wpapsk_fmt.c and opencl_wpapsk_fmt.c **/
static hccap_t *decode_hccap(char *ciphertext)
{
static hccap_t hccap;
char *essid = ciphertext + FORMAT_TAG_LEN;
char *hash = strrchr(ciphertext, '#');
char *d = hccap.essid;
char *cap = hash + 1;
unsigned char tbuf[sizeof(hccap_t)];
unsigned char *dst = tbuf;
int i;
memset(&hccap, 0, sizeof(hccap));
if (hash == NULL)
return &hccap;
while (essid != hash) { ///copy essid to hccap
*d++ = *essid++;
}
*d = '\0';
assert(*essid == '#');
for (i = 0; i < 118; i++) {
dst[0] =
(atoi64[ARCH_INDEX(cap[0])] << 2) |
(atoi64[ARCH_INDEX(cap[1])] >> 4);
dst[1] =
(atoi64[ARCH_INDEX(cap[1])] << 4) |
(atoi64[ARCH_INDEX(cap[2])] >> 2);
dst[2] =
(atoi64[ARCH_INDEX(cap[2])] << 6) |
(atoi64[ARCH_INDEX(cap[3])]);
dst += 3;
cap += 4;
}
dst[0] =
(atoi64[ARCH_INDEX(cap[0])] << 2) |
(atoi64[ARCH_INDEX(cap[1])] >> 4);
dst[1] =
(atoi64[ARCH_INDEX(cap[1])] << 4) |
(atoi64[ARCH_INDEX(cap[2])] >> 2);
/* This emits warnings on some compilers */
//memcpy(&hccap.mac1,tbuf,sizeof(hccap_t)-36);
memcpy(((char*)&hccap) + 36, tbuf, sizeof(hccap_t) - 36);
#if !ARCH_LITTLE_ENDIAN
hccap.eapol_size = JOHNSWAP(hccap.eapol_size);
hccap.keyver = JOHNSWAP(hccap.keyver);
#endif
return &hccap;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
uint32_t dummy;
} binary;
hccap_t *hccap = decode_hccap(ciphertext);
memcpy(binary.c, hccap->keymic, BINARY_SIZE);
return binary.c;
}
static void *get_salt(char *ciphertext)
{
static hccap_t s;
memcpy(&s, decode_hccap(ciphertext), SALT_SIZE);
return &s;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *hash;
int hashlength = 0;
hccap_t *hccap;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
hash = strrchr(ciphertext, '#');
if (hash == NULL || hash - (ciphertext + FORMAT_TAG_LEN) > 32)
return 0;
hash++;
while (hash < ciphertext + strlen(ciphertext)) {
if (atoi64[ARCH_INDEX(*hash++)] == 0x7f)
return 0;
hashlength++;
}
if (hashlength != 475)
return 0;
hccap = decode_hccap(ciphertext);
if (strlen(hccap->essid) > 32) /* real life limit */
return 0;
if (hccap->eapol_size > 256)
return 0;
if (hccap->keyver < 1)
return 0;
#if HAVE_OPENSSL_CMAC_H || defined(JOHN_OCL_WPAPSK)
if (hccap->keyver > 3)
return 0;
#else
if (hccap->keyver > 2)
return 0;
#endif
return 1;
}
#ifndef JOHN_OCL_WPAPSK
static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret)
{
char *text = (char*)"Pairwise key expansion";
unsigned char buff[100];
memcpy(buff, text, 22);
memcpy(buff + 23, data, 76);
buff[22] = 0;
buff[76 + 23] = 0;
hmac_sha1((unsigned char*)key, 32, buff, 100, (unsigned char*)ret, 20);
}
#endif
static void insert_mac(uint8_t * data)
{
int k = memcmp(hccap.mac1, hccap.mac2, 6);
if (k > 0) {
memcpy(data, hccap.mac2, 6);
memcpy(data + 6, hccap.mac1, 6);
} else {
memcpy(data, hccap.mac1, 6);
memcpy(data + 6, hccap.mac2, 6);
}
}
static void insert_nonce(uint8_t * data)
{
int k = memcmp(hccap.nonce1, hccap.nonce2, 32);
if (k > 0) {
memcpy(data, hccap.nonce2, 32);
memcpy(data + 32, hccap.nonce1, 32);
} else {
memcpy(data, hccap.nonce1, 32);
memcpy(data + 32, hccap.nonce2, 32);
}
}
#ifdef WPAPSK_DEBUG
static char *tomac(unsigned char *p) {
static char buf[48];
sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", p[0], p[1], p[2], p[3], p[4], p[5]);
return buf;
}
static char *hex(unsigned char *p, int len) {
static char buf[1024];
char *op=buf;
int i;
if (len > 32) {
do {
for (i = 0; i < 32; ++i) {
op += sprintf(op, "%02X", p[i]);
if (i<31&&i%4==3)
op += sprintf(op, " ");
if (i==15)
op += sprintf(op, ": ");
}
len -= 32;
p += 32;
op += sprintf(op, "\n ");
} while (len > 32);
}
for (i = 0; i < len; ++i) {
op += sprintf(op, "%02X", p[i]);
if (i<31&&i%4==3)
op += sprintf(op, " ");
if (i==15)
op += sprintf(op, ": ");
}
return buf;
}
static void Debug_hccap() {
printf("essid: %s\n", hccap.essid);
printf("mac1: %s\n", tomac(hccap.mac1));
printf("mac2: %s\n", tomac(hccap.mac2));
printf("nonce1: %s\n", hex(hccap.nonce1, 32));
printf("nonce2: %s\n", hex(hccap.nonce2, 32));
printf("eapol: %s\n", hex(hccap.eapol, 256));
printf("epol_sz: %d (0x%02X)\n", hccap.eapol_size, hccap.eapol_size);
printf("keyver: %d\n", hccap.keyver);
printf("keymic: %s\n", hex(hccap.keymic, 16));
}
#endif
static void set_salt(void *salt)
{
memcpy(&hccap, salt, SALT_SIZE);
strncpy((char*)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt));
currentsalt.length = strlen(hccap.essid);
#ifdef JOHN_OCL_WPAPSK
currentsalt.eapol_size = hccap.eapol_size;
memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size);
memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1);
memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1);
if (hccap.keyver == 2)
alter_endianity(currentsalt.eapol, 256+56);
((unsigned int*)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3;
insert_mac(currentsalt.data);
insert_nonce(currentsalt.data + 12);
if (hccap.keyver < 3)
alter_endianity(currentsalt.data, 64 + 12);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), ¤tsalt, 0, NULL, NULL), "Copy setting to gpu");
#endif
//Debug_hccap();
}
#ifndef JOHN_OCL_WPAPSK
static void clear_keys(void) {
new_keys = 1;
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
new_keys = 1;
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
#if HAVE_OPENSSL_CMAC_H
/* Code borrowed from https://w1.fi/wpa_supplicant/ starts */
#define SHA256_MAC_LEN 32
typedef uint16_t u16;
typedef uint8_t u8;
static inline void WPA_PUT_LE16(u8 *a, u16 val)
{
a[1] = val >> 8;
a[0] = val & 0xff;
}
static void sha256_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac)
{
SHA256_CTX ctx;
size_t i;
SHA256_Init(&ctx);
for (i = 0; i < num_elem; i++) {
SHA256_Update(&ctx, addr[i], len[i]);
}
SHA256_Final(mac, &ctx);
}
static void hmac_sha256_vector(const u8 *key, size_t key_len, size_t num_elem,
const u8 *addr[], const size_t *len, u8 *mac)
{
unsigned char k_pad[64]; /* padding - key XORd with ipad/opad */
const u8 *_addr[6];
size_t _len[6], i;
/* the HMAC_SHA256 transform looks like:
*
* SHA256(K XOR opad, SHA256(K XOR ipad, text))
*
* where K is an n byte key
* ipad is the byte 0x36 repeated 64 times
* opad is the byte 0x5c repeated 64 times
* and text is the data being protected */
/* start out by storing key in ipad */
memset(k_pad, 0, sizeof(k_pad));
memcpy(k_pad, key, key_len);
/* XOR key with ipad values */
for (i = 0; i < 64; i++)
k_pad[i] ^= 0x36;
/* perform inner SHA256 */
_addr[0] = k_pad;
_len[0] = 64;
for (i = 0; i < num_elem; i++) {
_addr[i + 1] = addr[i];
_len[i + 1] = len[i];
}
sha256_vector(1 + num_elem, _addr, _len, mac);
memset(k_pad, 0, sizeof(k_pad));
memcpy(k_pad, key, key_len);
/* XOR key with opad values */
for (i = 0; i < 64; i++)
k_pad[i] ^= 0x5c;
/* perform outer SHA256 */
_addr[0] = k_pad;
_len[0] = 64;
_addr[1] = mac;
_len[1] = SHA256_MAC_LEN;
sha256_vector(2, _addr, _len, mac);
}
static void sha256_prf_bits(const u8 *key, size_t key_len, const char *label,
const u8 *data, size_t data_len, u8 *buf, size_t buf_len_bits)
{
u16 counter = 1;
size_t pos, plen;
u8 hash[SHA256_MAC_LEN];
const u8 *addr[4];
size_t len[4];
u8 counter_le[2], length_le[2];
size_t buf_len = (buf_len_bits + 7) / 8;
addr[0] = counter_le;
len[0] = 2;
addr[1] = (u8 *) label;
len[1] = strlen(label);
addr[2] = data;
len[2] = data_len;
addr[3] = length_le;
len[3] = sizeof(length_le);
WPA_PUT_LE16(length_le, buf_len_bits);
pos = 0;
while (pos < buf_len) {
plen = buf_len - pos;
WPA_PUT_LE16(counter_le, counter);
if (plen >= SHA256_MAC_LEN) {
hmac_sha256_vector(key, key_len, 4, addr, len, &buf[pos]);
pos += SHA256_MAC_LEN;
} else {
hmac_sha256_vector(key, key_len, 4, addr, len, hash);
memcpy(&buf[pos], hash, plen);
pos += plen;
break;
}
counter++;
}
/*
* Mask out unused bits in the last octet if it does not use all the
* bits.
*/
if (buf_len_bits % 8) {
u8 mask = 0xff << (8 - buf_len_bits % 8);
buf[pos - 1] &= mask;
}
}
#endif /* HAVE_OPENSSL_CMAC_H */
/* Code borrowed from https://w1.fi/wpa_supplicant/ ends */
static void wpapsk_postprocess(int keys)
{
int i;
uint8_t data[64 + 12];
insert_mac(data);
insert_nonce(data + 12);
if (hccap.keyver == 1) {
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
uint32_t prf[20/4];
HMACMD5Context ctx;
prf_512(outbuffer[i].v, data, prf); // PTK
hmac_md5_init_K16((unsigned char*)prf, &ctx);
hmac_md5_update(hccap.eapol, hccap.eapol_size, &ctx);
hmac_md5_final(mic[i].keymic, &ctx);
}
} else if (hccap.keyver == 2) {
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
uint32_t prf[20/4];
prf_512(outbuffer[i].v, data, prf); // PTK
hmac_sha1((unsigned char*)prf, 16, hccap.eapol,
hccap.eapol_size, mic[i].keymic, 16);
}
#if HAVE_OPENSSL_CMAC_H
} else if (hccap.keyver == 3) { // 802.11w, WPA-PSK-SHA256
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic)
#endif
for (i = 0; i < keys; i++) {
unsigned char ptk[48];
unsigned char cmic[16];
size_t miclen;
CMAC_CTX *ctx;
sha256_prf_bits((unsigned char*)outbuffer[i].v, 32, "Pairwise key expansion", data, 76, ptk, 48 * 8); // PTK
// Compute MIC
ctx = CMAC_CTX_new();
CMAC_Init(ctx, ptk, 16, EVP_aes_128_cbc(), 0);
CMAC_Update(ctx, hccap.eapol, hccap.eapol_size);
CMAC_Final(ctx, cmic, &miclen);
memcpy(mic[i].keymic, cmic, 16);
CMAC_CTX_free(ctx);
}
#endif /* HAVE_OPENSSL_CMAC_H */
}
}
#endif /* #ifndef JOHN_OCL_WPAPSK */
static int binary_hash_0(void *binary)
{
#ifdef WPAPSK_DEBUG
puts("binary");
uint32_t i, *b = binary;
for (i = 0; i < 4; i++)
printf("%08x ", b[i]);
puts("");
#endif
return ((uint32_t *) binary)[0] & PH_MASK_0;
}
static int get_hash_0(int index)
{
#ifdef WPAPSK_DEBUG
int i;
puts("get_hash");
uint32_t *b = (uint32_t *)mic[index].keymic;
for (i = 0; i < 4; i++)
printf("%08x ", b[i]);
puts("");
#endif
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
uint32_t *h = (uint32_t *) mic[index].keymic;
return h[0] & PH_MASK_6;
}
static int cmp_all(void *binary, int count)
{
uint32_t i, b = ((uint32_t *) binary)[0];
for (i = 0; i < count; i++) {
uint32_t *m = (uint32_t*) mic[i].keymic;
if (b == m[0])
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
uint8_t i;
uint32_t *b = (uint32_t*) binary;
uint32_t *m = (uint32_t*) mic[index].keymic;
for (i = 0; i < BINARY_SIZE / 4; i++)
if (b[i] != m[i])
return 0;
return 1;
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int salt_compare(const void *x, const void *y)
{
int c = strncmp((const char*)x, (const char*)y, 36);
if (c) return c;
return memcmp((const char*)x, (const char*)y, SALT_SIZE);
}
/*
* key version as first tunable cost
* 1=WPA (MD5)
* 2=WPA2 (SHA1)
* 3=802.11w (SHA256)
*/
static unsigned int get_keyver(void *salt)
{
hccap_t *my_salt = salt;
return (unsigned int) my_salt->keyver;
}
#endif
|
mm-omp-ori.c | /**
*
* Matrix Multiplication - Shared-memory (OpenMP)
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
int size;
int threads;
typedef struct {
float ** element;
} matrix;
long long wall_clock_time() {
#ifdef LINUX
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000LL);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000LL);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
// allocate array for all the rows
m->element = (float**)malloc(sizeof(float*) * size);
if (m->element == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
m->element[i] = (float*)malloc(sizeof(float) * size);
if (m->element[i] == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
}
}
/**
* Free the memory allocated to a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
{
free(m->element[i]);
}
free(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Parallelize the multiplication
// Each thread will work on one iteration of the outer-most loop
// Variables are shared among threads (a, b, result)
// and each thread has its own private copy (i, j, k)
#pragma omp parallel for shared(a, b, result) private (i, j, k)
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result;
long long before, after;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform parallel matrix multiplication
before = wall_clock_time();
mm(a, b, result);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000);
// Print the result matrix
// print_matrix(result);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size> <threads>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
if (argc >= 3)
threads = atoi(argv[2]);
else
threads = -1;
// Multiply the matrices
if (threads != -1)
{
omp_set_num_threads(threads);
}
#pragma omp parallel
{
threads = omp_get_num_threads();
}
printf("Matrix multiplication of size %d using %d threads\n", size, threads);
work();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.