source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
update_ops_named_CZ.c |
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
//void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim);
//void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim);
void CZ_gate(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
//CZ_gate_old_single(control_qubit_index, target_qubit_index, state, dim);
//CZ_gate_old_parallel(control_qubit_index, target_qubit_index, state, dim);
//CZ_gate_single(control_qubit_index, target_qubit_index, state, dim);
//CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim);
//CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim);
//CZ_gate_parallel(control_qubit_index, target_qubit_index, state, dim);
//return;
#ifdef _USE_SIMD
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim);
}
else {
CZ_gate_parallel_simd(control_qubit_index, target_qubit_index, state, dim);
}
#else
CZ_gate_single_simd(control_qubit_index, target_qubit_index, state, dim);
#endif
#else
#ifdef _OPENMP
UINT threshold = 13;
if (dim < (((ITYPE)1) << threshold)) {
CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim);
}
else {
CZ_gate_parallel_unroll(control_qubit_index, target_qubit_index, state, dim);
}
#else
CZ_gate_single_unroll(control_qubit_index, target_qubit_index, state, dim);
#endif
#endif
}
void CZ_gate_single_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
if (target_qubit_index == 0 || control_qubit_index==0) {
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}else {
for (state_index = 0; state_index < loop_dim; state_index+=2) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
state[basis_index+1] *= -1;
}
}
}
#ifdef _OPENMP
void CZ_gate_parallel_unroll(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
if (target_qubit_index == 0 || control_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}
else {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
state[basis_index + 1] *= -1;
}
}
}
#endif
#ifdef _USE_SIMD
void CZ_gate_single_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
if (target_qubit_index == 0 || control_qubit_index == 0) {
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}
else {
__m256d minus_one = _mm256_set_pd(-1, -1, -1, -1);
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
double* ptr = (double*)(state + basis_index);
__m256d data = _mm256_loadu_pd(ptr);
data = _mm256_mul_pd(data,minus_one);
_mm256_storeu_pd(ptr, data);
}
}
}
#ifdef _OPENMP
void CZ_gate_parallel_simd(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
if (target_qubit_index == 0 || control_qubit_index == 0) {
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}
else {
__m256d minus_one = _mm256_set_pd(-1, -1, -1, -1);
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; state_index += 2) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
double* ptr = (double*)(state + basis_index);
__m256d data = _mm256_loadu_pd(ptr);
data = _mm256_mul_pd(data, minus_one);
_mm256_storeu_pd(ptr, data);
}
}
}
#endif
#endif
/*
void CZ_gate_old_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << max_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const ITYPE target_mask = 1ULL << target_qubit_index;
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index);
ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask;
state[basis_c1t1] *= -1;
}
}
#ifdef _OPENMP
void CZ_gate_old_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << max_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const ITYPE target_mask = 1ULL << target_qubit_index;
ITYPE state_index;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_insert_only_min = insert_zero_to_basis_index(state_index, min_qubit_mask, min_qubit_index);
ITYPE basis_c1t1 = insert_zero_to_basis_index(basis_insert_only_min, max_qubit_mask, max_qubit_index) ^ control_mask ^ target_mask;
state[basis_c1t1] *= -1;
}
}
#endif
void CZ_gate_single(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}
#ifdef _OPENMP
void CZ_gate_parallel(UINT control_qubit_index, UINT target_qubit_index, CTYPE *state, ITYPE dim) {
const ITYPE loop_dim = dim / 4;
const ITYPE target_mask = 1ULL << target_qubit_index;
const ITYPE control_mask = 1ULL << control_qubit_index;
const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index);
const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index);
const ITYPE min_qubit_mask = 1ULL << min_qubit_index;
const ITYPE max_qubit_mask = 1ULL << (max_qubit_index - 1);
const ITYPE low_mask = min_qubit_mask - 1;
const ITYPE mid_mask = (max_qubit_mask - 1) ^ low_mask;
const ITYPE high_mask = ~(max_qubit_mask - 1);
const ITYPE mask = target_mask + control_mask;
ITYPE state_index = 0;
#pragma omp parallel for
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_index = (state_index&low_mask)
+ ((state_index&mid_mask) << 1)
+ ((state_index&high_mask) << 2)
+ mask;
state[basis_index] *= -1;
}
}
#endif
*/ |
pagerank.c | #include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <unistd.h>
#include <omp.h>
#include "mt19937p.h"
#define g(x, y) (g[y*n+x])
/**
* Pr(x) = (1-d)/n + d*sum_{n in g(n,x)}(Pr(n)/(outdegree n))
* Runs 1 iteration of pagerank
* Returns 1 if done, 0 otherwise
*/
int run_iteration(int n, double d, int* restrict g, double* restrict w)
{
double* restrict wnew = (double*) calloc(n, sizeof(double));
int done = 1;
#pragma omp parallel for shared(g, w, wnew) reduction(&& : done)
for (int i=0; i<n; ++i) {
double sum = 0.0;
for (int j=0; j<n; ++j) {
//find edges pointing toward i
if (g(j,i)) {
//count out degree of j
int jDegree = 0;
for (int k=0; k<n; ++k) {
jDegree += g(j,k);
}
sum += w[j]/(double)jDegree;
}
}
wnew[i] = ((1.0 - d)/(double)n) + (d*sum);
done = fabs(wnew[i] - w[i]) < 1.0/(1000000.0 * (double)n);
}
memcpy(w, wnew, n * sizeof(double));
free(wnew);
return done;
}
/**
*
*/
int pagerank(int n, double d, int* restrict g, double* restrict w)
{
int iterations = 0;
for (int done = 0; !done; ) {
done = run_iteration(n, d, g, w);
iterations++;
}
return iterations;
}
/**
* # The random graph model
*
* Of course, we need to run the shortest path algorithm on something!
* For the sake of keeping things interesting, let's use a simple random graph
* model to generate the input data. The $G(n,p)$ model simply includes each
* possible edge with probability $p$, drops it otherwise -- doesn't get much
* simpler than that. We use a thread-safe version of the Mersenne twister
* random number generator in lieu of coin flips.
*/
int* gen_graph(int n, double p)
{
int* g = calloc(n*n, sizeof(int));
struct mt19937p state;
struct timeval time;
gettimeofday(&time, NULL);
sgenrand((unsigned long)time.tv_usec, &state);
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i)
g(i, j) = (genrand(&state) < p);
g(j, j) = 0; //no self edges
}
return g;
}
void write_matrix(const char* fname, int n, int* g)
{
FILE* fp = fopen(fname, "w+");
if (fp == NULL) {
fprintf(stderr, "Could not open output file: %s\n", fname);
exit(-1);
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j)
fprintf(fp, "%d ", g(i,j));
fprintf(fp, "\n");
}
fclose(fp);
}
void write_weights(const char* fname, int n, double* w)
{
FILE* fp = fopen(fname, "w+");
if (fp == NULL) {
fprintf(stderr, "Could not open output file: %s\n", fname);
exit(-1);
}
for (int i = 0; i < n; ++i) {
fprintf(fp, "%g ", w[i]);
}
fprintf(fp, "\n");
fclose(fp);
}
double checksum(const double* restrict w, int n) {
double sum = 0.0;
for (int i=0; i<n; ++i) {
sum += w[i];
}
return sum;
}
/**
* # The `main` event
*/
const char* usage =
"pagerank.x -- Compute pagerank on a random graph\n"
"Flags:\n"
" - n -- number of nodes (200)\n"
" - p -- probability of including edges (0.05)\n"
" - d -- probability that a user follows a link (0.85)\n"
" - i -- file name where adjacency matrix should be stored (none)\n"
" - o -- file name where output weights should be stored (none)\n";
int main(int argc, char** argv)
{
int n = 200; // Number of nodes
double p = 0.15; // Edge probability
double d = 0.85; // Probability a link is followed
const char* ifname = NULL; // Adjacency matrix file name
const char* ofname = NULL; // Distance matrix file name
// Option processing
extern char* optarg;
const char* optstring = "hn:d:p:o:i:";
int c;
while ((c = getopt(argc, argv, optstring)) != -1) {
switch (c) {
case 'h':
fprintf(stderr, "%s", usage);
return -1;
case 'n': n = atoi(optarg); break;
case 'p': p = atof(optarg); break;
case 'd': d = atof(optarg); break;
case 'o': ofname = optarg; break;
case 'i': ifname = optarg; break;
}
}
// Graph generation + output
int* g = gen_graph(n, p);
if (ifname)
write_matrix(ifname, n, g);
// Generate initial weights
double* w = calloc(n, sizeof(double));
for (int i = 0; i < n; ++i) {
w[i] = 1.0/(double)n;
}
// Time the pagerank code
double t0 = omp_get_wtime();
int iterations = pagerank(n, d, g, w);
double t1 = omp_get_wtime();
//openmp, cores, time, n, iterations, p, d, checksum
printf("openmp, %d, %g, %d, %d, %g, %g, %g\n",
omp_get_max_threads(),
(t1-t0),
n,
iterations,
p,
d,
checksum(w, n));
// Generate output file
if (ofname)
write_weights(ofname, n, w);
// Clean up
free(g);
free(w);
return 0;
}
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jxyang@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "gather_param.h"
typedef struct
{
int in_shape[4]; // the dim of the input
int axis;
int indices_num;
int dim_size;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct ir_tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
set_tengine_errno(ENOMEM);
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_gather_ops(void* arg)
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
static int unreg_gather_ops(void* arg)
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
AUTO_REGISTER_OPS(reg_gather_ops);
AUTO_UNREGISTER_OPS(unreg_gather_ops);
|
omp_section_firstprivate.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_section_firstprivate()
{
int sum;
int sum0;
int known_sum;
sum0 = 11;
sum = 7;
#pragma omp parallel
{
#pragma omp sections firstprivate(sum0)
{
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
}
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
}
}
#pragma omp section
{
#pragma omp critical
{
sum = sum + sum0;
}
}
}
}
known_sum = 11 * 3 + 7;
return (known_sum == sum);
} /* end of check_section_firstprivate*/
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_section_firstprivate()) {
num_failed++;
}
}
return num_failed;
}
|
matvec.c | /*
* matvec.c: Example of matrix-vector product in OpenMP.
*
* (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <omp.h>
#include <sys/time.h>
/*
* Memory consumption: O(m * n + n + m)
*/
enum {
m = 20000,
n = 20000
};
void *xmalloc(size_t size)
{
void *p = malloc(size);
if (!p) {
fprintf(stderr, "malloc failed\n");
exit(EXIT_FAILURE);
}
return p;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/* matrix_vector_product: Compute matrix-vector product c[m] = a[m][n] * b[n] */
void matrix_vector_product(double *a, double *b, double *c, int m, int n)
{
for (int i = 0; i < m; i++) {
c[i] = 0.0;
for (int j = 0; j < n; j++)
c[i] += a[i * n + j] * b[j];
}
}
/* matrix_vector_product_omp: Compute matrix-vector product c[m] = a[m][n] * b[n] in OpenMP */
void matrix_vector_product_omp(double *a, double *b, double *c, int m, int n)
{
#pragma omp parallel
{
int nthreads = omp_get_num_threads();
int threadid = omp_get_thread_num();
int items_per_thread = m / nthreads;
int lb = threadid * items_per_thread;
int ub = (threadid == nthreads - 1) ? (m - 1) : (lb + items_per_thread - 1);
for (int i = lb; i <= ub; i++) {
c[i] = 0.0;
for (int j = 0; j < n; j++)
c[i] += a[i * n + j] * b[j];
}
}
}
double run_serial()
{
double *a, *b, *c;
// Allocate memory for 2-d array a[m, n]
a = xmalloc(sizeof(*a) * m * n);
b = xmalloc(sizeof(*b) * n);
c = xmalloc(sizeof(*c) * m);
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++)
a[i * n + j] = i + j;
}
for (int j = 0; j < n; j++)
b[j] = j;
double t = wtime();
matrix_vector_product(a, b, c, m, n);
t = wtime() - t;
printf("Elapsed time (serial): %.6f sec.\n", t);
free(a);
free(b);
free(c);
return t;
}
double run_parallel()
{
double *a, *b, *c;
// Allocate memory for 2-d array a[m, n]
a = xmalloc(sizeof(*a) * m * n);
b = xmalloc(sizeof(*b) * n);
c = xmalloc(sizeof(*c) * m);
// Initialize and allocate pages from NUMA-nodes of threads (first-touch policy).
#pragma omp parallel
{
int nthreads = omp_get_num_threads();
int threadid = omp_get_thread_num();
int items_per_thread = m / nthreads;
int lb = threadid * items_per_thread;
int ub = (threadid == nthreads - 1) ? (m - 1) : (lb + items_per_thread - 1);
for (int i = lb; i <= ub; i++) {
for (int j = 0; j < n; j++)
a[i * n + j] = i + j;
c[i] = 0.0;
}
}
for (int j = 0; j < n; j++)
b[j] = j;
double t = wtime();
matrix_vector_product_omp(a, b, c, m, n);
t = wtime() - t;
printf("Elapsed time (parallel): %.6f sec.\n", t);
free(a);
free(b);
free(c);
return t;
}
int main(int argc, char **argv)
{
printf("Matrix-vector product (c[m] = a[m, n] * b[n]; m = %d, n = %d)\n", m, n);
printf("Memory used: %" PRIu64 " MiB\n", (uint64_t)(((double)m * n + m + n) * sizeof(double)) >> 20);
double tser = run_serial();
double tpar = run_parallel();
printf("Speedup: %.2f\n", tser / tpar);
return 0;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute num_teams(512)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute num_teams(256)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute num_teams(256)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// ****************************
// Series 2: with dist_schedule
// ****************************
//
// Test: #iterations == #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,1) num_teams(512)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations == #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,512) num_teams(512)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size
//
ZERO(A);
int ten = 10;
int chunkSize = 512/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(512)
for (int i = 0 ; i < 512 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 512 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,1) num_teams(256)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,500) num_teams(256)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size
//
ZERO(A);
ten = 10;
chunkSize = 500/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(256)
for (int i = 0 ; i < 500 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 500 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(1)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,1) num_teams(256)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(#iterations)
//
ZERO(A);
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,123) num_teams(256)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: #iterations < #teams, dist_schedule(#iterations)
//
ZERO(A);
ten = 10;
chunkSize = 123/ten;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute dist_schedule(static,chunkSize) num_teams(256)
for (int i = 0 ; i < 123 ; i++)
{
A[i] += C[i]; // += 1 per position
}
}
for (int i = 0 ; i < 123 ; i++)
if (A[i] != TRIALS) {
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// ****************************
// Series 3: with ds attributes
// ****************************
//
// Test: private
//
ZERO(A); ZERO(B);
double p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute private(p,q) num_teams(256)
for(int i = 0 ; i < N ; i++) {
p = 2;
q = 3;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*2) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
fail = 1;
}
if (B[i] != TRIALS*3) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: firstprivate
//
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute firstprivate(p,q) num_teams(64)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: lastprivate
//
// requires array because scalar would be treated as implicit firstprivate by target
int lastpriv[2] = {-1,-1};
#pragma omp target teams distribute lastprivate(lastpriv) num_teams(10)
for(int i = 0 ; i < omp_get_num_teams() ; i++) {
lastpriv[0] = omp_get_team_num();
}
if(lastpriv[0] != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv[0], 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// ***************************
// Series 4: with parallel for
// ***************************
//
// Test: simple blocking loop
//
ZERO(A); ZERO(B);
int nte = 32;
int tl = 64;
int blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 256 ; j += blockSize) {
#pragma omp parallel for
for(int i = j ; i < j+blockSize; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: blocking loop where upper bound is not a multiple of tl*nte
//
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 510 ; j += blockSize) {
int ub = (j+blockSize < 510) ? (j+blockSize) : 512;
#pragma omp parallel for
for(int i = j ; i < ub; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 5: collapse
// **************************
//
// Test: 2 loops
//
double * S = (double*)malloc(N*N*sizeof(double));
double * T = (double*)malloc(N*N*sizeof(double));
double * U = (double*)malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute collapse(2) map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) num_teams(512)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
int M = N/8;
double * V = (double*)malloc(M*M*M*sizeof(double));
double * Z = (double*)malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute collapse(3) map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) num_teams(512)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
openmp_matmul.c | /*
* Sample program to test runtime of simple matrix multiply
* with and without OpenMP on gcc-4.3.3-tdm1 (mingw)
*
* (c) 2009, Rajorshi Biswas
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <omp.h>
int main(int argc, char **argv)
{
int i,j,k;
int n;
double temp;
double start, end, run;
n = 100;
int **arr1 = malloc( sizeof(int*) * n);
int **arr2 = malloc( sizeof(int*) * n);
int **arr3 = malloc( sizeof(int*) * n);
for(i=0; i<n; ++i) {
arr1[i] = malloc( sizeof(int) * n );
arr2[i] = malloc( sizeof(int) * n );
arr3[i] = malloc( sizeof(int) * n );
}
printf("Populating array with random values...\n");
srand( time(NULL) );
for(i=0; i<n; ++i) {
for(j=0; j<n; ++j) {
arr1[i][j] = (rand() % n);
arr2[i][j] = (rand() % n);
}
}
printf("Completed array init.\n");
printf("Crunching without OMP...");
fflush(stdout);
start = omp_get_wtime();
for(i=0; i<n; ++i) {
for(j=0; j<n; ++j) {
temp = 0;
for(k=0; k<n; ++k) {
temp += arr1[i][k] * arr2[k][j];
}
arr3[i][j] = temp;
}
}
end = omp_get_wtime();
printf(" took %f seconds.\n", end-start);
printf("Crunching with OMP...");
fflush(stdout);
start = omp_get_wtime();
#pragma omp parallel for private(i, j, k, temp) num_threads(4)
for(i=0; i<n; ++i) {
for(j=0; j<n; ++j) {
temp = 0;
for(k=0; k<n; ++k) {
temp += arr1[i][k] * arr2[k][j];
}
arr3[i][j] = temp;
}
}
end = omp_get_wtime();
printf(" took %f seconds.\n", end-start);
return 0;
}
|
flat_blas_l1.c | /*******************************************************************************
* Copyright 2019 UChicago Argonne, LLC.
* (c.f. AUTHORS, LICENSE)
*
* This file is part of the AML project.
* For more info, see https://github.com/anlsys/aml
*
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
/*
* This is a benchmark for the BLAS Level 1 operations for AML.
*/
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include "aml.h"
#include "aml/area/linux.h"
#include "blas_l1_kernel.h"
#include "utils.h"
#include "verify_blas_l1.h"
/* Look into another way to define these parameters */
#define DEFAULT_ARRAY_SIZE (1UL << 15)
#ifdef NTIMES
#if NTIMES <= 1
#define NTIMES 10
#endif
#endif
#ifndef NTIMES
#define NTIMES 10
#endif
#define OFFSET 0
#ifndef MIN
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#endif
#ifndef MAX
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#endif
static double *a, *b, *c;
typedef double (*r)(size_t, double *, double *, double *, double);
r run_f[8] = {&dcopy, &dscal, &daxpy, &dasum, &ddot, &dnrm2, &dswap, &idmax};
v verify_f[8] = {&verify_dcopy, &verify_dscal, &verify_daxpy, &verify_dasum,
&verify_ddot, &verify_dnrm2, &verify_dswap, &verify_idmax};
int main(int argc, char *argv[])
{
aml_init(&argc, &argv);
size_t i, j, k;
size_t nb_reps;
size_t memsize;
double dscalar;
long long int timing;
long long int sumtime[10] = {0}, maxtime[10] = {0},
mintime[10] = {LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX,
LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX,
LONG_MAX, LONG_MAX};
char *label[10] = {
"Copy: ", "Scale: ", "Triad: ", "Asum: ",
"Dot: ", "Norm: ", "Swap: ", "Max ID: ",
"RotP: ", "RotM: "};
if (argc == 1) {
memsize = DEFAULT_ARRAY_SIZE;
nb_reps = NTIMES;
} else {
assert(argc == 2);
memsize = 1UL << atoi(argv[1]);
nb_reps = atoi(argv[2]);
}
printf("Each kernel will be executed %ld times.\n", nb_reps);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf("Number of threads required = %li\n", k);
}
}
k = 0;
#pragma omp parallel
#pragma omp atomic
k++;
printf("Number of threads counted = %li\n", k);
size_t size = sizeof(double) * (memsize + OFFSET);
// AML code
struct aml_area *area = &aml_area_linux;
a = aml_area_mmap(area, size, NULL);
b = aml_area_mmap(area, size, NULL);
c = aml_area_mmap(area, size, NULL);
/* MAIN LOOP - repeat test cases nb_reps */
dscalar = 3.0;
double x = 1.0, y = 2.0;
double param[5];
param[0] = -1.0;
for (k = 1; k < 5; k++)
param[k] = k;
double res;
aml_time_t start, end;
for (k = 0; k < nb_reps; k++) {
// Trying this array of functions thing
for (i = 0; i < 8; i++) {
init_arrays(memsize, a, b, c);
aml_gettime(&start);
res = run_f[i](memsize, a, b, c, dscalar);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_f[i](memsize, a, b, c, dscalar, res);
sumtime[i] += timing;
mintime[i] = MIN(mintime[i], timing);
maxtime[i] = MAX(maxtime[i], timing);
}
// Rotations
init_arrays(memsize, a, b, c);
aml_gettime(&start);
drot(memsize, a, b, x, y);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_drot(memsize, a, b, c, x, y, res);
sumtime[8] += timing;
mintime[8] = MIN(mintime[i], timing);
maxtime[8] = MAX(maxtime[i], timing);
init_arrays(memsize, a, b, c);
aml_gettime(&start);
drotm(memsize, a, b, param);
aml_gettime(&end);
timing = aml_timediff(start, end);
verify_drotm(memsize, a, b, c, x, y, res);
sumtime[9] += timing;
mintime[9] = MIN(mintime[i], timing);
maxtime[9] = MAX(maxtime[i], timing);
/* Add the rotation generations later, + 2 functions
drotg(x, y, dc, ds);
drotmg(d1, d2, x, y, param);
*/
}
/* SUMMARY */
printf("Function Avg time Min time Max time\n");
for (j = 0; j < 10; j++) {
double avg = (double)sumtime[j] / (double)(nb_reps - 1);
printf("%s\t%11.6f\t%lld\t%lld\n", label[j], avg, mintime[j],
maxtime[j]);
}
/* aml specific code */
aml_area_munmap(area, a, size);
aml_area_munmap(area, b, size);
aml_area_munmap(area, c, size);
aml_finalize();
return 0;
}
|
GB_binop__first_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__first_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__first_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__first_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int32)
// A*D function (colscale): GB (_AxD__first_int32)
// D*A function (rowscale): GB (_DxB__first_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__first_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__first_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_INT32 || GxB_NO_FIRST_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
Optimizer.h | /*
* Optimizer.h
*
* Created by Guido Novati on 30.10.18.
* Copyright 2018 ETH Zurich. All rights reserved.
*
*/
#pragma once
#include <fstream>
#include "Network.h"
struct MomentumSGD
{
const Real eta;
const Real normalization; // 1/batchSize
const Real beta;
const Real lambda;
MomentumSGD(const Real _eta, // Learning rate
const int batchSize,
const Real _beta1,
const Real _beta2,
const Real _lambda
) : eta(_eta), normalization(1./batchSize), beta(_beta1), lambda(_lambda) {}
// perform gradient update for a parameter array:
inline void step (
const int size, // parameter array's size
Real* const param, // parameter array
Real* const grad, // parameter array gradient
Real* const mom1st, // parameter array gradient 1st moment
Real* const mom2nd // parameter array gradient 2nd moment (unused)
) const
{
// DONE: TODO : Compute Momentum SGD update
#pragma omp parallel for schedule(static)
for (int p= 0; p < size; p++)
{
mom1st[p]= beta * mom1st[p] - eta * normalization * grad[p]; // 1st moment update
param[p] += mom1st[p]; // param (e.g. weights, biases) update
param[p] -= eta * lambda * param[p]; // L2 penalization
}
}
};
template<typename Algorithm>
struct Optimizer
{
Network& NET;
const Real eta, beta_1, beta_2, lambda;
// grab the reference to network weights and parameters
std::vector<Params*> & parms = NET.params;
std::vector<Params*> & grads = NET.grads;
// allocate space to store first (and if needed second) moment of the grad
// which will allow us to learn with momentum:
std::vector<Params*> momentum_1st = NET.allocateGrad();
std::vector<Params*> momentum_2nd = NET.allocateGrad();
// counter of gradient step:
size_t step = 0;
// Constructor:
Optimizer(Network& NN, Real LR = .001, // Learning rate. Should be in range [1e-5 to 1e-2]
Real L2penal = 0, // L2 penalization coefficient. Found by exploration.
Real B1 = .900, // Momentum coefficient. Should be in range [.5 to .9]
Real B2 = .999 // Second moment coefficient. Currently not in use.
) :
NET(NN), eta(LR), beta_1(B1), beta_2(B2), lambda(L2penal) {
}
virtual ~Optimizer() {
for (auto& p : momentum_1st)
_dispose_object(p);
for (auto& p : momentum_2nd)
_dispose_object(p);
}
virtual void update(const int batchSize)
{
assert(parms.size() == grads.size());
assert(parms.size() == momentum_1st.size());
assert(parms.size() == momentum_2nd.size());
// Given some learning algorithm..
const Algorithm algo(eta, batchSize, beta_1, beta_2, lambda);
// ... loop over all parameter arrays and compute the update:
#pragma omp parallel for schedule(static)
for (size_t j = 0; j < parms.size(); j++)
{
if (parms[j] == nullptr) continue; //layer does not have parameters
if (parms[j]->nWeights > 0)
{
algo.step(parms[j]->nWeights,
parms[j]->weights, grads[j]->weights,
momentum_1st[j]->weights, momentum_2nd[j]->weights);
grads[j]->clearWeight(); // reset for next step
}
if (parms[j]->nBiases > 0)
{
algo.step(parms[j]->nBiases,
parms[j]->biases, grads[j]->biases,
momentum_1st[j]->biases, momentum_2nd[j]->biases);
grads[j]->clearBias(); // reset for next step
}
}
step++;
}
};
|
openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main( int argc, char **argv )
{
int thread_id;
int num_threads;
#pragma omp parallel private( num_threads )
{
thread_id = omp_get_thread_num();
printf("\nHello from thread: %d", thread_id );
}
return 0;
}
|
coarsen_grid_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
#include "coarsen_grid_mex.h"
void coarsen_grid(uint8_t *G2,
const uint8_t *G, const size_t *sz2, const size_t *sz);
#ifdef COARSEN_GRID_MEX
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 2) || (nlhs > 1)) {
mexErrMsgTxt("Usage: coarsen_grid_mex(G2, G);");
return;
}
uint8_t *G2 = (uint8_t *)mxGetData(prhs[0]);
const uint8_t *G = (const uint8_t *)mxGetData(prhs[1]);
const size_t *sz2 = (const size_t *)mxGetDimensions(prhs[0]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[1]);
coarsen_grid(G2, G, sz2, sz);
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
#endif
void
mx_coarsen_grid(mxArray *mxG2, const mxArray *mxG)
{
uint8_t *G2 = (uint8_t *)mxGetData(mxG2);
const uint8_t *G = (const uint8_t *)mxGetData(mxG);
const size_t *sz = (const size_t *)mxGetDimensions(mxG);
const size_t *sz2 = (const size_t *)mxGetDimensions(mxG2);
coarsen_grid(G2, G, sz2, sz);
return;
}
void
coarsen_grid(uint8_t *G2, const uint8_t *G, const size_t *sz2, const size_t *sz)
{
size_t i2, j2, k2;
size_t l2, lk2;
size_t i, j, k;
size_t l, lk;
uint8_t bi, bj, bk;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nx2 = sz2[0];
const size_t ny2 = sz2[1];
const size_t nz2 = sz2[2];
const size_t nxny2 = nx2*ny2;
const size_t NX = nx-2;
const size_t NY = ny-2;
const size_t NZ = nz-2;
const size_t NX2 = nx2-1;
const size_t NY2 = ny2-1;
const size_t NZ2 = nz2-1;
/* offset indices */
const size_t o110 = 1 + nx + 0;
const size_t o101 = 1 + 0 + nxny;
const size_t o011 = 0 + nx + nxny;
const size_t o111 = 1 + nx + nxny;
#pragma omp parallel for private(l2) schedule(static) \
if (nxny2*nz2 > 64*64*64)
for(l2 = 0; l2 < nxny2*nz2; ++l2) {
G2[l2] = 0;
}
#pragma omp parallel for private(i2,j2,k2,lk2,l2,i,j,k,lk,l,bi,bj,bk) \
schedule(static)
for(k2 = 1; k2 < NZ2; ++k2) {
k = (k2<<1)-1;
lk = nxny*k;
lk2 = nxny2*k2;
for(j2 = 1; j2 < NY2; ++j2) {
j = (j2<<1)-1;
l = 1 + nx*j + lk;
l2 = 1 + nx2*j2 + lk2;
for(i2 = 1; i2 < NX2; ++i2, ++l2, l += 2) {
i = (i2<<1)-1;
bi = !(i < NX);
bj = !(j < NY);
bk = !(k < NZ);
G2[l2] = G[l] &&
(bi || G[l+1]) &&
(bj || G[l+nx]) &&
(bk || G[l+nxny]) &&
(bi || bj || G[l+o110]) &&
(bi || bk || G[l+o101]) &&
(bj || bk || G[l+o011]) &&
(bi || bj || bk || G[l+o111]);
}
}
}
return;
}
|
msgpass_sumprod.c | #define INF 1E20
#include <math.h>
#include "mex.h"
double square(double x) { return x*x; }
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
double *pr_in;
double offset, sigma, th;
const mwSize *ndims;
double *pr_out;
double *prior;
int i,j,k;
pr_in = (double *)mxGetPr(prhs[0]);
ndims = mxGetDimensions(prhs[0]);
offset = (double)mxGetScalar(prhs[1]);
sigma = (double)mxGetScalar(prhs[2]);
th = (double)mxGetScalar(prhs[3]);
plhs[0] = mxCreateNumericArray(3,ndims,mxDOUBLE_CLASS,mxREAL);
pr_out = (double *)mxGetPr(plhs[0]);
/*
prior = calloc(ndims[0]*ndims[1]*ndims[2],sizeof(double));
for(i=0;i<ndims[0];i++){
for(j=0;j<ndims[1];j++){
for(k=0;k<ndims[2];k++){
prior[i+j*ndims[0]+k*ndims[0]*ndims[1]] =
exp( - square(sqrt(square(i)+square(j)+square(k)) - offset) / (2*square(sigma)));
}
}
}
*/
#pragma omp parallel for private(i,j,k)
for(i=0;i<ndims[0];i++){
for(j=0;j<ndims[1];j++){
for(k=0;k<ndims[2];k++){
int ind1,ind2;
int x,y,z;
int xmin,xmax,ymin,ymax,zmin,zmax;
double dx,dy,dz;
double lmax,lmin;
double pr;
ind1 = i + j*ndims[0] + k*ndims[0]*ndims[1];
pr = 0;
xmin = (int)MAX(floor((double)i-offset-th),0);
xmax = (int)MIN(ceil((double)i+offset+th),ndims[0]-1);
for(x=xmin;x<=xmax;x++){
dx = fabs((double)(x-i));
lmax = sqrt(square(offset+th)-square(dx));
ymin = (int)MAX(floor((double)j-lmax),0);
ymax = (int)MIN(ceil((double)j+lmax),ndims[1]-1);
for(y=ymin;y<=ymax;y++){
dy = fabs((double)(y-j));
lmax = ceil(sqrt(MAX(square(offset+th)-square(dx)-square(dy),0)));
lmin = floor(sqrt(MAX(square(offset-th)-square(dx)-square(dy),0)));
zmin = (int)((double)k+lmin);
zmax = (int)MIN(((double)k+lmax),ndims[2]-1);
for(z=zmin;z<=zmax;z++){
dz = fabs((double)(z-k));
ind2 = x + y*ndims[0] + z*ndims[0]*ndims[1];
pr = pr + pr_in[ind2];
}
zmin = (int)MAX(((double)k-lmax),0);
zmax = (int)((double)k-lmin);
for(z=zmin;z<=zmax;z++){
dz = fabs((double)(z-k));
ind2 = x + y*ndims[0] + z*ndims[0]*ndims[1];
pr = pr + pr_in[ind2];
}
}
}
pr_out[ind1] = pr;
}
}
}
}
|
inneronly1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
//Example with loop-carried data dependence at the outer level loop.
#include <string.h>
int main(int argc,char *argv[])
{
int i;
int j;
double a[20][20];
memset(a,0,(sizeof(a)));
for (i = 0; i < 20 -1; i += 1) {
#pragma omp parallel for
for (j = 0; j < 20; j += 1) {
a[i][j] += a[i + 1][j];
}
}
return 0;
}
|
tmp2.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose_avx512.h"
#include "ukr10x2vCnnb1f1024x17y17c512r1s1.h"
#include "ukr10x2vGemmb1f1024x17y17c512r1s1AS.h"
#include "ukr9x2vGemmb1f1024x17y17c512r1s1.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
#pragma omp parallel num_threads(18)
{
int tid = omp_get_thread_num();
int Nx = 17;
int Ny = 17;
int Nh = 1;
int Astrides[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
int b1 = 0;
for (int fpck = (tid%18)*16; fpck < uNf; fpck+=18*16){
for(int cwh = (tid/18)*16; cwh < uNc*uNw*uNh/16*16; cwh+=16*1){
transpose16x16_avx512(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<512+0;c5+=512)
{
for(int xy5=0;xy5<289+0;xy5+=289)
{
for(int f5=0;f5<1024+0;f5+=1024)
{
for(int c4=c5;c4<min(512, 512+c5);c4+=512)
{
for(int xy4=xy5;xy4<min(289, 289+xy5);xy4+=289)
{
for(int f4=f5;f4<min(1024, 1024+f5);f4+=1024)
{
for(int c3=c4;c3<min(512, 512+c4);c3+=128)
{
for(int f3=f4+tid%18*32;f3<min(1024, 1024+f4);f3+=32*18)
{
for(int xy3=xy4;xy3<min(289, 289+xy4);xy3+=270)
{
for(int xy2=xy3;xy2<min(289, 270+xy3);xy2+=10)
{
for(int f2=f3;f2<min(1024, 32+f3);f2+=32)
{
for(int c2=c3;c2<min(512, 128+c3);c2+=128)
{
for(int c1=c2;c1<min(512, 128+c2);c1+=128)
{
for(int xy1=xy2;xy1<min(289, 10+xy2);xy1+=10)
{
for(int f1=f2;f1<min(1024, 32+f2);f1+=32)
{
int ctile=min(128, 512-c1);
int x1=xy1/17;
int y1=xy1%17/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*147968+c1_1*289+1*x1*17+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*8192+c1*16+0*16+0*16+kf1_2*1;
int offsetC=0+b1*295936+of1_1*289+x1*17+y1*1+of1_2*1;
if(17-y1>=10){
ukr10x2vCnnb1f1024x17y17c512r1s1(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(17*17-xy1>=10){
for(int sti=17-y1;sti<10;sti+=1)
{
Astrides[sti]+=0;
}
ukr10x2vGemmb1f1024x17y17c512r1s1AS(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=17-y1;sti<10;sti+=1)
{
Astrides[sti]-=0;
}
}
else{
ukr9x2vGemmb1f1024x17y17c512r1s1(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
}} |
p6.c | /*
* ATMS 502 / CSE 566 -- Spring, 2016
* pgm6
* >>>>> Linjian Ma <<<<<
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/*#include <ncarg/ncargC.h>
#include <ncarg/gks.h>
#define IWTYPE 1
#define WKID 1*/
main()
{
/*
* Definitions
* NX is the number of physical grid points - Not including 'ghost points'
* BC_WIDTH is the number of ghost points - on each side of the physical grid
* I1 is the first C index to use - the left-most physical point
* I2 is the last C index to use - the right-most physical point
* NXDIM is the total array size, including grid points; use for declarations
* MAXSTEP is the maximum number of time steps to take
* "c" is [constant] flow speed (e.g. meters/sec) - used only in linear case
* "dx" is grid spacing: distance (e.g. meters) between grid points in X, Y
* "name" is a variable containing your name - used to label the plots.
*/
#define NX 300
#define NY 300
#define NZ 75
#define BC_WIDTH 2
#define I1 BC_WIDTH
#define I2 I1+NX-1
#define J1 BC_WIDTH
#define J2 J1+NY-1
#define K1 BC_WIDTH
#define K2 K1+NZ-1
#define NXDIM NX+2*BC_WIDTH
#define NYDIM NY+2*BC_WIDTH
#define NZDIM NZ+2*BC_WIDTH
#define x0 0
#define y0 0
#define z0 0
#define MAXSTEP 1000
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
float dx = 50;
float dy = 50;
float dz = 50;
float dt = 0.1;
float tstep;
int nstep;
int nplot;
char *name = "Linjian Ma";
/* Variables for runtime timing */
time_t tstart,time1,time2,tlast[10],elapsed;
float estimate1,estimate2;
int min1,sec1,min2,sec2;
tstart = time(0);
/* Arrays and other variables */
float theta[NXDIM][NYDIM][NZDIM],theta_d2[NXDIM][NYDIM][NZDIM],theta_d1[NXDIM][NYDIM][NZDIM];
float p1[NXDIM][NYDIM][NZDIM],p2[NXDIM][NYDIM][NZDIM],p3[NXDIM][NYDIM][NZDIM];
float thetaplot[NX][NY][NZ],pplot[NX][NY][NZ];
float pplot_xz[NX][NZ],pplot_xy[NX][NY],pplot_yz[NY][NZ];
float u1[I2+2][NYDIM][NZDIM],u2[I2+2][NYDIM][NZDIM],u3[I2+2][NYDIM][NZDIM];
float v1[NXDIM][J2+2][NZDIM],v2[NXDIM][J2+2][NZDIM],v3[NXDIM][J2+2][NZDIM];
float w1[NXDIM][NYDIM][K2+2],w2[NXDIM][NYDIM][K2+2],w3[NXDIM][NYDIM][K2+2];
float uplot[NX][NY][NZ],vplot[NX][NY][NZ],wplot[NX][NY][NZ];
float rho[K2+2];
/*int delta_theta[2],xstart[2],zstart[2],xradius[2],zradius[2];*/
float K_u,K_v,K_w,K_theta;
float q_av[MAXSTEP],qmin[MAXSTEP],qmax[MAXSTEP],courant,qtrue_av,qtrue_max,qtrue_min;
int i,j,k,n;
int method = 1;
char label[200];
float cor_max,err_t,err_dissip,err_disper;
float cint = 2.0;
int colors = 0;
int pltzero = -1;
float angh = 45.0;
float angv = 20.0;
/* Parameters and input .................................... */
printf("Program #6 Numerical Fluid Dynamics, Spring 2016\n\n");
printf("NX=%d, BC_WIDTH=%d, I1=%d, I2=%d, NXDIM=%d\n",NX,BC_WIDTH,I1,I2,NXDIM);
printf("NY=%d, BC_WIDTH=%d, J1=%d, J2=%d, NYDIM=%d\n",NY,BC_WIDTH,J1,J2,NYDIM);
printf("NZ=%d, BC_WIDTH=%d, K1=%d, K2=%d, NZDIM=%d\n",NZ,BC_WIDTH,K1,K2,NZDIM);
printf("dx=%8.5f, dy=%8.5f,dx=%8.5f, dt=%8.5f, nstep=%d, nplot=%d\n",dx,dy,dz,dt,nstep,nplot);
printf(" Enter the number of steps \n");
scanf("%d",&nstep);
printf(" Enter the plotting interval \n");
scanf("%d",&nplot);
/*printf(" Enter the diffusivity of u \n");
scanf("%d",&K_u);
printf(" Enter the diffusivity of w \n");
scanf("%d",&K_w);
printf(" Enter the diffusivity of theta \n");
scanf("%d",&K_theta);*/
K_u = 40.0;
K_v = 40.0;
K_w = 40.0;
K_theta = 5.0;
/* Definitions for routines */
void ic(),stats(),plot1d(),bc(),integrate(),update(),advec1d(),advection(),PGF(),error(),diffusion(),putfield();
/* Set and plot the initial condition*/
n = 0;
ic(rho,theta_d1,u1,v1,w1,p1,dx,dy,dz,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,x0,y0,z0,BC_WIDTH);
bc(theta_d1,p1,u1,v1,w1,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH);
for (i=I1;i<=I2+1;i++) /*u2 = u1*/
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
u2[i][j][k] = u1[i][j][k];
}
for (i=I1-1;i<=I2+1;i++) /*v2 = v1*/
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
v2[i][j][k] = v1[i][j][k];
}
for (i=I1-1;i<=I2+1;i++) /*w2 = w1*/
for (j=J1-1;j<=J2+1;j++)
for (k=K1;k<=K2+1;k++)
{
w2[i][j][k] = w1[i][j][k];
}
for (i=I1-1;i<=I2+1;i++) /*p2 = p1*/
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
p2[i][j][k] = p1[i][j][k];
}
/* plot the initial condition */
/* ... NOTE: pass field name in double quotes (string, not char expected) */
for (i=I1;i<=I2;i++)
for (j=J1;j<=J2;j++)
for (k=K1;k<=K2;k++)
{
thetaplot[i-I1][j-J1][k-K1] = theta_d1[i][j][k];
pplot[i-I1][j-J1][k-K1] = p1[i][j][k];
uplot[i-I1][j-J1][k-K1] = (u1[i][j][k]+u1[i+1][j][k])/2;
vplot[i-I1][j-J1][k-K1] = (v1[i][j][k]+v1[i][j+1][k])/2;
wplot[i-I1][j-J1][k-K1] = (w1[i][j][k]+w1[i][j][k+1])/2;
}
putfield("T",dt*(float)n,thetaplot,NX,NY,NZ);
putfield("P",dt*(float)n,pplot,NX,NY,NZ);
putfield("U",dt*(float)n,uplot,NX,NY,NZ);
putfield("V",dt*(float)n,vplot,NX,NY,NZ);
putfield("W",dt*(float)n,wplot,NX,NY,NZ);
/* Integrate */
for (n=1; n<=nstep; n++) {
if (n == 1)
{
tstep = dt;
}
else
{
tstep = 2*dt;
}
bc(theta_d1,p1,u1,v1,w1,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH);
bc(theta_d1,p2,u2,v2,w2,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH);
#pragma omp parallel for shared(p3,p1) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
p3[i][j][k] = p1[i][j][k];
}
#pragma omp parallel for shared(theta_d2,theta_d1) private(i,j,k)
for (i=I1-2;i<=I2+2;i++)
for (j=J1-2;j<=J2+2;j++)
for (k=K1-2;k<=K2+2;k++)
{
theta_d2[i][j][k] = theta_d1[i][j][k];
}
#pragma omp parallel for shared(u3,u1) private(i,j,k)
for (i=I1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
u3[i][j][k] = u1[i][j][k];
}
#pragma omp parallel for shared(v3,v1) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
v3[i][j][k] = v1[i][j][k];
}
#pragma omp parallel for shared(w3,w1) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1;k<=K2+1;k++)
{
w3[i][j][k] = w1[i][j][k];
}
/* advect */
advection(theta_d2,p3,u3,u2,v3,v2,w3,w2,dx,dy,dz,dt,tstep,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH);
/* diffusion */
diffusion(theta_d2,theta_d1,u3,u1,v3,v1,w3,w1,dx,dy,dz,tstep,dt,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH,K_u,K_v,K_w,K_theta);
/* PGF */
PGF(theta_d1,p3,u3,v3,w3,rho,dx,dy,dz,tstep,I1,I2,J1,J2,K1,K2,NXDIM,NYDIM,NZDIM,BC_WIDTH);
/*Return the values back*/
#pragma omp parallel for shared(p1,p2,p3,theta_d1,theta_d2) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
p1[i][j][k] = p2[i][j][k];
p2[i][j][k] = p3[i][j][k];
theta_d1[i][j][k] = theta_d2[i][j][k];
}
#pragma omp parallel for shared(u1,u2,u3) private(i,j,k)
for (i=I1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
u1[i][j][k] = u2[i][j][k];
u2[i][j][k] = u3[i][j][k];
}
#pragma omp parallel for shared(v1,v2,v3) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1-1;k<=K2+1;k++)
{
v1[i][j][k] = v2[i][j][k];
v2[i][j][k] = v3[i][j][k];
}
#pragma omp parallel for shared(w1,w2,w3) private(i,j,k)
for (i=I1-1;i<=I2+1;i++)
for (j=J1-1;j<=J2+1;j++)
for (k=K1;k<=K2+1;k++)
{
w1[i][j][k] = w2[i][j][k];
w2[i][j][k] = w3[i][j][k];
}
/* plot */
if ( (n % nplot)==0)
{
#pragma omp parallel for shared(thetaplot,pplot,uplot,vplot,wplot,theta_d2,p3,u3,v3,w3) private(i,j,k)
for (i=I1;i<=I2;i++)
for (j=J1;j<=J2;j++)
for (k=K1;k<=K2;k++)
{
thetaplot[i-I1][j-J1][k-K1] = theta_d2[i][j][k];
pplot[i-I1][j-J1][k-K1] = p3[i][j][k];
uplot[i-I1][j-J1][k-K1] = (u3[i][j][k]+u3[i+1][j][k])/2;
vplot[i-I1][j-J1][k-K1] = (v3[i][j][k]+v3[i][j+1][k])/2;
wplot[i-I1][j-J1][k-K1] = (w3[i][j][k]+w3[i][j][k+1])/2;
}
putfield("T",dt*(float)n,thetaplot,NX,NY,NZ);
putfield("P",dt*(float)n,pplot,NX,NY,NZ);
putfield("U",dt*(float)n,uplot,NX,NY,NZ);
putfield("V",dt*(float)n,vplot,NX,NY,NZ);
putfield("W",dt*(float)n,wplot,NX,NY,NZ);
}
/*
* TIMING; report wallclock per-time-step based on
* last 10 steps, and estimates to complete run.
* first estimate is based on last 10 steps,
* second on time since start of run
*/
/*time2 = time(0);
if (n>10) {
time1 = tlast[n%10];
elapsed = time2-time1;
estimate1 = (float)(nstep-n) * ( (float)(time2-time1)/10.0 );
min1 = (int) (estimate1 / 60.0);
sec1 = (int) (estimate1 - 60.0*(float)min1 );
estimate2 = (float)(nstep-n) * ( (float)(time2-tstart) / (float)n );
min2 = (int) (estimate2 / 60.0);
sec2 = (int) (estimate2 - 60.0*(float)min2 );
printf("Step %4d: time/step %5.2f sec, est. time remaining %03d:%02d / %03d:%02d\n",n,(float)elapsed/10.0,min1,sec1,min2,sec2);
}
tlast[n%10] = time2;*/
} /* end of time loop n = 1,...,nstep */
exit;
}
|
GB_unaryop__minv_uint64_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_uint8
// op(A') function: GB_tran__minv_uint64_uint8
// C type: uint64_t
// A type: uint8_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_uint8
(
uint64_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_par_in_loop.c | // RUN: %libomp-compile-and-run
//
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define TYPE long
#define MAX_ITER (TYPE)((TYPE)1000000)
#define EVERY (TYPE)((TYPE)100000)
int main(int argc, char* argv[]) {
TYPE x = MAX_ITER;
omp_set_max_active_levels(2);
omp_set_num_threads(2);
#pragma omp parallel for schedule(nonmonotonic:dynamic,1)
for (TYPE i = 0; i < x; i++) {
int tid = omp_get_thread_num();
omp_set_num_threads(1);
#pragma omp parallel proc_bind(spread)
{
if (i % EVERY == (TYPE)0)
printf("Outer thread %d at iter %ld\n", tid, i);
}
}
printf("passed\n");
return 0;
}
|
dragonfly3_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* The DragonFly BSD 2.10.1-REL crypt-sha2 hashes are seriously broken. See
* http://www.openwall.com/lists/john-dev/2012/01/16/1
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_dragonfly3_32;
extern struct fmt_main fmt_dragonfly3_64;
#elif FMT_REGISTERS_H
john_register_one(&fmt_dragonfly3_32);
john_register_one(&fmt_dragonfly3_64);
#else
#include "sha2.h"
#include <string.h>
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 4096 // tuned on K8-dual HT
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL_32 "dragonfly3-32"
#define FORMAT_LABEL_64 "dragonfly3-64"
#define FORMAT_NAME_32 "DragonFly BSD $3$ w/ bug, 32-bit"
#define FORMAT_NAME_64 "DragonFly BSD $3$ w/ bug, 64-bit"
#define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR " " SHA2_LIB
#define FORMAT_TAG "$3$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 44
#define BINARY_SIZE 32
#define BINARY_ALIGN 4
#define SALT_SIZE_32 (1+4+8) // 1st char is length
#define SALT_SIZE_64 (1+8+8)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests_32[] = {
{"$3$z$EBG66iBCGfUfENOfqLUH/r9xQxI1cG373/hRop6j.oWs", "magnum"},
{"$3$f6daU5$Xf/u8pKp.sb4VCLKz7tTZMUKJ3J4oOfZgUSHYOFL.M0n", ""},
{"$3$PNPA2tJ$ppD4bXqPMYFVdYVYrxXGMWeYB6Xv8e6jmXbvrB5V.okl", "password"},
{"$3$jWhDSrS$bad..Dy7UAyabPyfrEi3fgQ2qtT.5fE7C5EMNo/n.Qk5", "John the Ripper"},
{"$3$SSYEHO$hkuDmUQHT2Tr0.ai.lUVyb9bCC875Up.CZVa6UJZ.Muv", "DragonFly BSD"},
{"$3$pomO$a2ltqo.LlUSt1DG68sv2FZOdLcul0gYQ3xmn6z0G.I6Y", "123"},
{"$3$F$8Asqp58WwQ3WDMhaR3yQMSJGdCtpBqckemkCSNnJ.gRr", "12345678"},
{NULL}
};
static struct fmt_tests tests_64[] = {
{"$3$z$sNV7KLtLxvJRsj2MfBtGZFuzXP3CECITaFq/rvsy.Y.Q", "magnum"},
{"$3$f6daU5$eV2SX9vUHTMsoy3Ic7cWiQ4mOxyuyenGjYQWkJmy.AF3", ""},
{"$3$PNPA2tJ$GvXjg6zSge3YDh5I35JlYZHoQS2r0/.vn36fQzSY.A0d", "password"},
{"$3$jWhDSrS$5yBH7KFPmsg.PhPeDMj1MY4fv9061zdbYumPe2Ve.Y5J", "John the Ripper"},
{"$3$SSYEHO$AMYLyanRYs8F2U07FsBrSFuOIygJ4kgqvpBB17BI.61N", "DragonFly BSD"},
{"$3$e$TzMK1ePmjnZI/YbGes/1PAKqbj8aOV31Hf8Tz9es.kkq", "123"},
{"$3$XcMa$idKoaBQXdRlhfJFDjnV0jDryW/nEBAGXONyzJvnH.cR3", "12345678"},
{NULL}
};
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)
[(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)];
static char *cur_salt;
static int salt_len;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos, *start;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ciphertext += FORMAT_TAG_LEN;
for (pos = ciphertext; *pos && *pos != '$'; pos++);
if (!*pos || pos < ciphertext || pos > &ciphertext[8]) return 0;
start = ++pos;
while (atoi64[ARCH_INDEX(*pos)] != 0x7F) pos++;
if (*pos || pos - start != CIPHERTEXT_LENGTH) return 0;
return 1;
}
#define TO_BINARY(b1, b2, b3) \
value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \
((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \
((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \
pos += 4; \
out[b1] = value >> 16; \
out[b2] = value >> 8; \
out[b3] = value;
static void *get_binary(char *ciphertext)
{
static uint32_t outbuf[BINARY_SIZE/4];
uint32_t value;
char *pos;
unsigned char *out = (unsigned char*)outbuf;
int i;
pos = strrchr(ciphertext, '$') + 1;
for (i = 0; i < 10; i++) {
TO_BINARY(i, i + 11, i + 21);
}
value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] |
((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) |
((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) |
((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18);
out[10] = value >> 16;
out[31] = value >> 8;
return (void *)out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static void set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
/* First the password */
SHA256_Update(&ctx, saved_key[index], saved_len[index]);
/* Then the salt, including the $3$ magic */
SHA256_Update(&ctx, cur_salt, salt_len);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static void set_salt(void *salt)
{
salt_len = (int)*(char*)salt;
cur_salt = (char*)salt + 1;
}
// For 32-bit version of the bug, our magic is "$3$\0" len 4
static void *get_salt_32(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_32, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_32);
ciphertext += FORMAT_TAG_LEN;
strcpy(&out[1], FORMAT_TAG);
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[5], ciphertext, len);
out[0] = len + 4;
return out;
}
// For 64-bit version of the bug, our magic is "$3$\0sha5" len 8
static void *get_salt_64(char *ciphertext)
{
static char *out;
int len;
if (!out) out = mem_alloc_tiny(SALT_SIZE_64, MEM_ALIGN_WORD);
memset(out, 0, SALT_SIZE_64);
ciphertext += FORMAT_TAG_LEN;
memcpy(&out[1], "$3$\0sha5", 8);
for (len = 0; ciphertext[len] != '$'; len++);
memcpy(&out[9], ciphertext, len);
out[0] = len + 8;
return out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
unsigned char *s = (unsigned char*)salt + 1;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < *(unsigned char*)salt; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_dragonfly3_32 = {
{
FORMAT_LABEL_32,
FORMAT_NAME_32,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_32,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ FORMAT_TAG },
tests_32
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_32,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_dragonfly3_64 = {
{
FORMAT_LABEL_64,
FORMAT_NAME_64,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE_64,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
{ NULL },
tests_64
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt_64,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Example_tasking.18.c | /*
* @@name: tasking.18c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_4.0
*/
#include <stdio.h>
int main()
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(out: x)
x = 2;
#pragma omp task shared(x) depend(in: x)
printf("x + 1 = %d. ", x+1);
#pragma omp task shared(x) depend(in: x)
printf("x + 2 = %d\n", x+2);
}
return 0;
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
/*!
* \brief Get the trees contained in this boosting class. Used for MOJO writing.
*/
inline const std::vector<std::unique_ptr<Tree>>& GetTrees() const override {
return models_;
}
bool IsLinear() const override { return linear_tree_; }
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
ark_brusselator1D_omp.c | /*---------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds @ SMU
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2022, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a brusselator problem from chemical
* kinetics. This is n PDE system with 3 components, Y = [u,v,w],
* satisfying the equations,
* u_t = du*u_xx + a - (w+1)*u + v*u^2
* v_t = dv*v_xx + w*u - v*u^2
* w_t = dw*w_xx + (b-w)/ep - w*u
* for t in [0, 80], x in [0, 1], with initial conditions
* u(0,x) = a + 0.1*sin(pi*x)
* v(0,x) = b/a + 0.1*sin(pi*x)
* w(0,x) = b + 0.1*sin(pi*x),
* and with stationary boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* v_t(t,0) = v_t(t,1) = 0,
* w_t(t,0) = w_t(t,1) = 0.
* Note: these can also be implemented as Dirichlet boundary
* conditions with values identical to the initial conditions.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with the DIRK method, using a
* Newton iteration with the band linear solver, and a
* user-supplied Jacobian routine. This example uses the OpenMP
* vector kernel, and employs OpenMP threading within the
* right-hand side and Jacobian construction functions.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */
#include <nvector/nvector_openmp.h> /* access to OpenMP N_Vector */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* def. of type 'realtype' */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* accessor macros between (x,v) location and 1D NVector array */
#define IDX(x,v) (3*(x)+v)
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
int nthreads; /* number of OpenMP threads */
realtype dx; /* mesh spacing */
realtype a; /* constant forcing on u */
realtype b; /* steady-state value of w */
realtype du; /* diffusion coeff for u */
realtype dv; /* diffusion coeff for v */
realtype dw; /* diffusion coeff for w */
realtype ep; /* stiffness parameter */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/* Private helper functions */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata);
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main(int argc, char *argv[])
{
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(10.0); /* final time */
int Nt = 100; /* total number of output times */
int Nvar = 3; /* number of solution fields */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype a = 0.6; /* problem parameters */
realtype b = 2.0;
realtype du = 0.025;
realtype dv = 0.025;
realtype dw = 0.025;
realtype ep = 1.0e-5; /* stiffness parameter */
realtype reltol = 1.0e-6; /* tolerances */
realtype abstol = 1.0e-10;
sunindextype NEQ, i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
N_Vector umask = NULL; /* empty mask vectors for viewing solution components */
N_Vector vmask = NULL;
N_Vector wmask = NULL;
SUNMatrix A = NULL; /* empty matrix for linear solver */
SUNLinearSolver LS = NULL; /* empty linear solver structure */
void *arkode_mem = NULL; /* empty ARKode memory structure */
realtype pi, t, dTout, tout, u, v, w;
FILE *FID, *UFID, *VFID, *WFID;
int iout, num_threads;
long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf;
/* Create the SUNDIALS context object for this simulation */
SUNContext ctx;
flag = SUNContext_Create(NULL, &ctx);
if (check_flag(&flag, "SUNContext_Create", 1)) return 1;
/* allocate udata structure */
udata = (UserData) malloc(sizeof(*udata));
if (check_flag((void *) udata, "malloc", 2)) return 1;
/* set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* store the inputs in the UserData structure */
udata->N = N;
udata->a = a;
udata->b = b;
udata->du = du;
udata->dv = dv;
udata->dw = dw;
udata->ep = ep;
udata->nthreads = num_threads;
/* set total allocated vector length */
NEQ = Nvar*udata->N;
/* Initial problem output */
printf("\n1D Brusselator PDE test problem:\n");
printf(" N = %li, NEQ = %li\n", (long int) udata->N, (long int) NEQ);
printf(" num_threads = %i\n", num_threads);
printf(" problem parameters: a = %"GSYM", b = %"GSYM", ep = %"GSYM"\n",
udata->a, udata->b, udata->ep);
printf(" diffusion coefficients: du = %"GSYM", dv = %"GSYM", dw = %"GSYM"\n",
udata->du, udata->dv, udata->dw);
printf(" reltol = %.1"ESYM", abstol = %.1"ESYM"\n\n", reltol, abstol);
/* Initialize vector data structures */
/* Create vector for solution */
y = N_VNew_OpenMP(NEQ, num_threads, ctx);
if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1;
/* Create vector masks */
umask = N_VClone(y);
if (check_flag((void *)umask, "N_VClone", 0)) return 1;
vmask = N_VClone(y);
if (check_flag((void *)vmask, "N_VClone", 0)) return 1;
wmask = N_VClone(y);
if (check_flag((void *)wmask, "N_VClone", 0)) return 1;
/* Set initial conditions into y */
udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */
data = N_VGetArrayPointer(y); /* Access data array for new NVector y */
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
pi = RCONST(4.0)*atan(RCONST(1.0));
for (i=0; i<N; i++) {
data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */
data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */
data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */
}
/* Set mask array values for each solution component */
N_VConst(0.0, umask);
data = N_VGetArrayPointer(umask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0);
N_VConst(0.0, vmask);
data = N_VGetArrayPointer(vmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0);
N_VConst(0.0, wmask);
data = N_VGetArrayPointer(wmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0);
/* Initialize matrix and linear solver data structures */
A = SUNBandMatrix(NEQ, 4, 4, ctx);
if (check_flag((void *)A, "SUNBandMatrix", 0)) return 1;
LS = SUNLinSol_Band(y, A, ctx);
if (check_flag((void *)LS, "SUNLinSol_Band", 0)) return 1;
/* Call ARKStepCreate to initialize the ARK timestepper module and
specify the right-hand side function in y'=f(t,y), the inital time
T0, and the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
arkode_mem = ARKStepCreate(NULL, f, T0, y, ctx);
if (check_flag((void *)arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Linear solver specification */
flag = ARKStepSetLinearSolver(arkode_mem, LS, A); /* Attach matrix and linear solver */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacFn(arkode_mem, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacFn", 1)) return 1;
/* output spatial mesh to disk */
FID=fopen("bruss_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data arrays */
UFID=fopen("bruss_u.txt","w");
VFID=fopen("bruss_v.txt","w");
WFID=fopen("bruss_w.txt","w");
/* output initial condition to disk */
data = N_VGetArrayPointer(y);
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
/* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms ||v||_rms ||w||_rms\n");
printf(" ----------------------------------------------\n");
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStepEvolve", 1)) break;
u = N_VWL2Norm(y,umask); /* access/print solution statistics */
u = sqrt(u*u/N);
v = N_VWL2Norm(y,vmask);
v = sqrt(v*v/N);
w = N_VWL2Norm(y,wmask);
w = sqrt(w*w/N);
printf(" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM"\n", t, u, v, w);
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
}
printf(" ----------------------------------------------\n");
fclose(UFID);
fclose(VFID);
fclose(WFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumJacEvals(arkode_mem, &nje);
check_flag(&flag, "ARKStepGetNumJacEvals", 1);
flag = ARKStepGetNumLinRhsEvals(arkode_mem, &nfeLS);
check_flag(&flag, "ARKStepGetNumLinRhsEvals", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS);
printf(" Total number of Jacobian evaluations = %li\n", nje);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n\n", netf);
/* Clean up and return with successful completion */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
SUNMatDestroy(A); /* Free matrix */
N_VDestroy(y); /* Free vectors */
N_VDestroy(umask);
N_VDestroy(vmask);
N_VDestroy(wmask);
SUNContext_Free(&ctx); /* Free context */
return 0;
}
/*-------------------------------
* Functions called by the solver
*-------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype a = udata->a;
realtype b = udata->b;
realtype ep = udata->ep;
realtype du = udata->du;
realtype dv = udata->dv;
realtype dw = udata->dw;
realtype dx = udata->dx;
realtype *Ydata=NULL, *dYdata=NULL;
realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr;
sunindextype i = 0;
/* clear out ydot (to be careful) */
N_VConst(0.0, ydot);
Ydata = N_VGetArrayPointer(y); /* access data arrays */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
dYdata = N_VGetArrayPointer(ydot);
if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* initialize ydot to zero */
/* iterate over domain, computing all equations */
uconst = du/dx/dx;
vconst = dv/dx/dx;
wconst = dw/dx/dx;
#pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set shortcuts */
u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)];
v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)];
w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)];
/* u_t = du*u_xx + a - (w+1)*u + v*u^2 */
dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u;
/* v_t = dv*v_xx + w*u - v*u^2 */
dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u;
/* w_t = dw*w_xx + (b-w)/ep - w*u */
dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u;
}
/* enforce stationary boundaries */
dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0;
dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0;
return 0;
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData udata = (UserData) user_data; /* access problem data */
SUNMatZero(J); /* Initialize Jacobian to zero */
/* Fill in the Laplace matrix */
if (LaplaceMatrix(RCONST(1.0), J, udata)) {
printf("Jacobian calculation error in calling LaplaceMatrix!\n");
return 1;
}
/* Add in the Jacobian of the reaction terms matrix */
if (ReactionJac(RCONST(1.0), y, J, udata)) {
printf("Jacobian calculation error in calling ReactionJac!\n");
return 1;
}
return 0;
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Routine to compute the stiffness matrix from (L*y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype dx = udata->dx;
sunindextype i = 0;
realtype uconst = c*udata->du/dx/dx;
realtype vconst = c*udata->dv/dx/dx;
realtype wconst = c*udata->dw/dx/dx;
/* iterate over intervals, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* Jacobian of (L*y) at this node */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i-1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i-1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i-1,2)) += wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) -= RCONST(2.0)*uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) -= RCONST(2.0)*vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) -= RCONST(2.0)*wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i+1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i+1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i+1,2)) += wconst;
}
return 0;
}
/* Routine to compute the Jacobian matrix from R(y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype ep = udata->ep;
sunindextype i = 0;
realtype u, v, w;
realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
/* iterate over nodes, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set nodal value shortcuts (shifted index due to start at first interior node) */
u = Ydata[IDX(i,0)];
v = Ydata[IDX(i,1)];
w = Ydata[IDX(i,2)];
/* all vars wrt u */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0)));
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,0)) += c*(-w);
/* all vars wrt v */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,1)) += c*(u*u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u);
/* all vars wrt w */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,2)) += c*(-u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,2)) += c*(u);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u);
}
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
section_1.c | #include<omp.h>
#include<stdio.h>
int main()
{
int arr[5]={1,2,3,4,5};
int array[5]={10,20,30,40,50};
int i;
int result;
#pragma omp parallel sections
{
#pragma omp section
#pragma omp parallel for
for(int i=0;i<5;i++)
{
result = arr[i]+array[i];
printf("Value post addition%d\n",result);
}
#pragma omp section
#pragma omp parallel for
for(int i=0;i<5;i++)
{
result = arr[i]*array[i];
printf("Value post multiplication %d\n",result);
}
}
}
|
GB_unaryop__minv_bool_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_uint64
// op(A') function: GB_tran__minv_bool_uint64
// C type: bool
// A type: uint64_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, x) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_uint64
(
bool *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) {
for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(32*t3+Nx+28,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),128*t4+126),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
phase.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void phase_usage() {
fprintf(stderr, "usage: margin phase <ALIGN_BAM> <REFERENCE_FASTA> <VARIANT_VCF> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Tags reads in ALIGN_BAM and phases variants in VARIANT_VCF.\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n");
fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n");
fprintf(stderr, " PARAMS is the file with margin parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\nOutput options:\n");
fprintf(stderr, " -M --skipHaplotypeBAM : Do not write out phased BAM\n");
fprintf(stderr, " -v --phasePrimaryVariantsOnly : Skip step where filtered variants are phased using read haplotypes\n");
fprintf(stderr, " -V --skipPhasedVCF : Do not write out phased VCF\n");
fprintf(stderr, "\n");
}
int phase_main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
int64_t maxDepth = -1;
bool inMemory = TRUE;
bool shouldOutputHaplotaggedBam = TRUE;
bool shouldOutputPhasedVcf = TRUE;
bool phasePrimaryVariantsOnly = FALSE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
phase_usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
vcfFile = stString_copy(argv[3]);
paramsFile = stString_copy(argv[4]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "depth", required_argument, 0, 'p'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ "skipHaplotypeBAM", no_argument, 0, 'M'},
{ "phasePrimaryVariantsOnly", no_argument, 0, 'v'},
{ "skipPhasedVCF", no_argument, 0, 'V'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:kMvV", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
phase_usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 'p':
maxDepth = atoi(optarg);
if (maxDepth < 0) {
st_errAbort("Invalid maxDepth: %s", optarg);
}
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case 'k':
inMemory = FALSE;
break;
case 'M':
shouldOutputHaplotaggedBam = FALSE;
break;
case 'V':
shouldOutputPhasedVcf = FALSE;
phasePrimaryVariantsOnly = TRUE;
break;
case 'v':
phasePrimaryVariantsOnly = TRUE;
break;
default:
phase_usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
return 0;
}
}
// sanity check (conflicting params)
if (!shouldOutputHaplotaggedBam && !shouldOutputPhasedVcf) {
st_errAbort("With --skipHaplotypeBAM and --skipPhasedVCF there will be no output.\n");
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(vcfFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// update depth (if set)
if (maxDepth >= 0) {
st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth,
maxDepth);
params->polishParams->maxDepth = (uint64_t) maxDepth;
}
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get valid contigs (to help bam chunker construction)
stList *vcfContigsTmp = stHash_getKeys(vcfEntries);
stSet *vcfContigs = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(vcfContigsTmp); i++) {
stSet_insert(vcfContigs, stList_get(vcfContigsTmp, i));
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, vcfContigs, params->polishParams, TRUE);
char *regionStrInformative = regionStr != NULL ? stString_copy(regionStr) : stString_join2(",", vcfContigsTmp);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStrInformative, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
free(regionStrInformative);
stList_destruct(vcfContigsTmp);
stSet_destruct(vcfContigs);
// print chunk info
char *outputChunksFile = stString_print("%s.chunks.csv", outputBase);
FILE *chunksOut = safe_fopen(outputChunksFile, "w");
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
BamChunk *c = stList_get(bamChunker->chunks, i);
fprintf(chunksOut, "%s,%"PRId64",%"PRId64",%"PRId64",%"PRId64"\n", c->refSeqName, c->chunkOverlapStart,
c->chunkOverlapEnd, c->chunkStart, c->chunkEnd);
}
fclose(chunksOut);
free(outputChunksFile);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, NULL, NULL, NULL, NULL,
".hap1", ".hap2", inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeElapsedDescriptor = getTimeDescriptorFromSeconds(timeTaken);
char *timeLeftDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64", %s). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeElapsedDescriptor, timeLeftDescriptor);
free(timeLeftDescriptor);
free(timeElapsedDescriptor);
}
// Get reference string for chunk of alignment
char *chunkReference = getSequenceFromReference(referenceFastaFile, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// get VCF string
stList *chunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
stList *filteredChunkVcfEntries = stList_construct3(0, (void(*)(void*))vcfEntry_destruct);
getVcfEntriesForRegion(vcfEntries, chunkVcfEntries, filteredChunkVcfEntries, NULL,
bamChunk->refSeqName, bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
// get alleles and read substrings for all vcf entries and a unified set of bcrs:
// we do main phasing with some and then phase the filtered var with same reads
// update vcf alleles
updateVcfEntriesWithSubstringsAndPositions(chunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
if (!phasePrimaryVariantsOnly) {
updateVcfEntriesWithSubstringsAndPositions(filteredChunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
}
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *readsForFilteredVcfEntries = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReadsForFilteredVcfEntries = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
extractReadSubstringsAtVariantPositions(bamChunk, chunkVcfEntries, reads, filteredReads, params);
if (!phasePrimaryVariantsOnly) {
extractReadSubstringsAtVariantPositions(bamChunk, filteredChunkVcfEntries, readsForFilteredVcfEntries,
filteredReadsForFilteredVcfEntries, params);
}
// do downsampling if appropriate
if (params->polishParams->maxDepth > 0) {
// get downsampling structures
stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
bool didDownsample = downsampleBamChunkReadWithVcfEntrySubstringsViaFullReadLengthLikelihood(
params->polishParams->maxDepth, chunkVcfEntries, reads, maintainedReads, filteredReads);
// we need to destroy the discarded reads and structures
if (didDownsample) {
st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(reads), stList_length(maintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(reads, NULL);
stList_destruct(reads);
// and keep the filtered reads
reads = maintainedReads;
}
// no downsampling, we just need to free the (empty) objects
else {
assert(stList_length(maintainedReads) == 0);
stList_destruct(maintainedReads);
}
}
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
BubbleGraph *bg = NULL;
stHash *readsToPSeqs = NULL;
stSet *readsBelongingToHap1 = NULL, *readsBelongingToHap2 = NULL;
stGenomeFragment *gf = NULL;
stReference *ref = NULL;
stList *vcfEntriesToBubbles = NULL;
// Get the bubble graph representation
bg = bubbleGraph_constructFromVCFAndBamChunkReadVcfEntrySubstrings(reads, chunkVcfEntries, params,
&vcfEntriesToBubbles);
// Now make a POA for each of the haplotypes and phase
ref = bubbleGraph_getReference(bg, bamChunk->refSeqName, params);
gf = bubbleGraph_phaseBubbleGraph(bg, ref, reads, params, &readsToPSeqs);
stGenomeFragment_phaseBamChunkReads(gf, readsToPSeqs, reads, &readsBelongingToHap1, &readsBelongingToHap2,
params->phaseParams);
st_logInfo(" %s After phasing, of %i reads got %i reads partitioned into hap1 and %i reads partitioned "
"into hap2 (%i unphased)\n", logIdentifier, (int) stList_length(reads),
(int) stSet_size(readsBelongingToHap1), (int) stSet_size(readsBelongingToHap2),
(int) (stList_length(reads) - stSet_size(readsBelongingToHap1) -
stSet_size(readsBelongingToHap2)));
st_logInfo(" %s Phased primary reads in %d sec\n", logIdentifier, time(NULL) - primaryPhasingStart);
// phase filtered variants (if we're generating a VCF)
if (!phasePrimaryVariantsOnly) {
bubbleGraph_phaseVcfEntriesFromHaplotaggedReads(readsForFilteredVcfEntries, filteredChunkVcfEntries,
readsBelongingToHap1, readsBelongingToHap2, bamChunk,
bamChunker->readEnumerator, params);
}
// assign filtered reads to haplotypes
for (int64_t bcrIdx = 0; bcrIdx < stList_length(reads); bcrIdx++) {
BamChunkRead *bcr = stList_get(reads, bcrIdx);
if (!stSet_search(readsBelongingToHap1, bcr) && !stSet_search(readsBelongingToHap2, bcr)) {
// was filtered in some form
stList_append(filteredReads, bamChunkRead_constructCopy(bcr));
}
}
st_logInfo(" %s Assigning %"PRId64" filtered reads to haplotypes\n", logIdentifier,
stList_length(filteredReads));
time_t filteredPhasingStart = time(NULL);
bubbleGraph_partitionFilteredReadsFromVcfEntries(filteredReads, gf, bg, vcfEntriesToBubbles,
readsBelongingToHap1,
readsBelongingToHap2, params, logIdentifier);
st_logInfo(" %s Partitioned filtered reads in %d sec.\n", logIdentifier, time(NULL) - filteredPhasingStart);
// Output
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
NULL, NULL, reads, readsBelongingToHap1, readsBelongingToHap2, gf,
params);
// save
// only use primary reads (not filteredReads) to track read phasing
updateOriginalVcfEntriesWithBubbleData(bamChunk, reads, bamChunker->readEnumerator, gf, bg,
vcfEntriesToBubbles, readsBelongingToHap1, readsBelongingToHap2, logIdentifier);
// Cleanup
stList_destruct(chunkVcfEntries);
stList_destruct(filteredChunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
stHash_destruct(readsToPSeqs);
stList_destruct(vcfEntriesToBubbles);
free(chunkReference);
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n",
logIdentifier, stList_length(reads) + stList_length(filteredReads), (int) (time(NULL) - chunkStartTime));
}
// final post-completion logging cleanup
stList_destruct(reads);
stList_destruct(filteredReads);
stList_destruct(readsForFilteredVcfEntries);
stList_destruct(filteredReadsForFilteredVcfEntries);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = stList_construct3(0, free);
stList *allReadIdsHap2 = stList_construct3(0, free);
// for writing vcf
bool *chunkWasSwitched = st_calloc(bamChunker->chunkCount, sizeof(bool));
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, TRUE, bamChunker->chunkCount, allReadIdsHap1, allReadIdsHap2,
chunkWasSwitched);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical(" Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logInfo("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
if (shouldOutputHaplotaggedBam) {
// logging
time_t hapBamStart = time(NULL);
// get all reads
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for (int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
// loggit
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotagged BAM in %s\n", hapBamTDS);
// cleanup
free(hapBamTDS);
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
}
// maybe write VCF
if (shouldOutputPhasedVcf) {
// loggit
time_t vcfWriteStart = time(NULL);
char *outputVcfFile = stString_print("%s.phased.vcf", outputBase);
char *outputPhaseSetFile = stString_print("%s.phaseset.bed", outputBase);
st_logCritical("> Writing phased VCF to %s, phaseset info to %s\n", outputVcfFile, outputPhaseSetFile);
// write it
updateHaplotypeSwitchingInVcfEntries(bamChunker, chunkWasSwitched, vcfEntries);
writePhasedVcf(vcfFile, regionStr, outputVcfFile, outputPhaseSetFile, vcfEntries, params);
// loggit
char *phasedVcfTDS = getTimeDescriptorFromSeconds(time(NULL) - vcfWriteStart);
st_logCritical("> Wrote phased VCF in %s\n", phasedVcfTDS);
// cleanup
free(phasedVcfTDS);
free(outputVcfFile);
free(outputPhaseSetFile);
}
// cleanup
free(chunkWasSwitched);
bamChunker_destruct(bamChunker);
params_destruct(params);
if (regionStr != NULL) free(regionStr);
stList_destruct(chunkOrder);
free(vcfFile);
stHash_destruct(vcfEntries);
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished phasing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
naive_parallel.c | #include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<omp.h>
#define NUM_THREADS 4
int count=0;
void search(char *t,int start,int end,char *p)
{
int i,j;
int n=end-start+1;
int m=strlen(p);
for(i=start;i<=end-m;i++)
{
for(j=0;j<m;j++)
if(t[i+j]!=p[j])
break;
if(j==m){
printf("pattern found at index %d\n",i);
count++;
}
}
return;
}
int main()
{
char pat[10];
char *text;
int n,m,i=0;
size_t size = 0;
FILE *fp = fopen("gene.txt", "r");
fseek(fp, 0, SEEK_END);
size = ftell(fp);
rewind(fp);
text = malloc((size + 1) * sizeof(*text));
fread(text, size, 1, fp);
text[size] = '\0';
scanf("%s",pat);
int lenp=strlen(pat);
int bs=strlen(text)/NUM_THREADS;
int rem=strlen(text)%NUM_THREADS;
int tid,start,end;
#pragma omp parallel num_threads(NUM_THREADS) private(tid,start,end) shared(text,pat,rem,bs,m)
{
tid=omp_get_thread_num();
if(tid==0)
{
#pragma omp critical (part1)
{
start=tid;
end=bs-1;
search(text,start,end,pat);
}
}
else
{
#pragma omp critical (part2)
{
start=(tid*bs)-lenp;
end=(tid*bs)+bs-1;
search(text,start,end,pat);
}
}
}
if(rem!=0)
search(text,(NUM_THREADS+1)*bs,strlen(text),pat);
printf("Total number of matches = %d\n",count );
return 0;
}
|
depth-metrics.h | // License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
//
// Plane Fit implementation follows http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points algorithm
#pragma once
#include <vector>
#include <mutex>
#include <array>
#include <imgui.h>
#include <librealsense2/rsutil.h>
#include <librealsense2/rs.hpp>
#include "rendering.h"
namespace rs2
{
namespace depth_quality
{
struct snapshot_metrics
{
int width;
int height;
rs2::region_of_interest roi;
float distance;
float angle;
float angle_x;
float angle_y;
plane p;
std::array<float3, 4> plane_corners;
};
struct single_metric_data
{
single_metric_data(std::string name, float val) :
val(val), name(name) {}
float val;
std::string name;
};
using callback_type = std::function<void(
const std::vector<rs2::float3>& points,
const plane p,
const rs2::region_of_interest roi,
const float baseline_mm,
const float focal_length_pixels,
const int ground_thruth_mm,
const bool plane_fit,
const float plane_fit_to_ground_truth_mm,
bool record,
std::vector<single_metric_data>& samples)>;
inline plane plane_from_point_and_normal(const rs2::float3& point, const rs2::float3& normal)
{
return{ normal.x, normal.y, normal.z, -(normal.x*point.x + normal.y*point.y + normal.z*point.z) };
}
inline plane plane_from_points(const std::vector<rs2::float3> points)
{
if (points.size() < 3) throw std::runtime_error("Not enough points to calculate plane");
rs2::float3 sum = { 0,0,0 };
for (auto point : points) sum = sum + point;
rs2::float3 centroid = sum / float(points.size());
double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0;
for (auto point : points) {
rs2::float3 temp = point - centroid;
xx += temp.x * temp.x;
xy += temp.x * temp.y;
xz += temp.x * temp.z;
yy += temp.y * temp.y;
yz += temp.y * temp.z;
zz += temp.z * temp.z;
}
double det_x = yy*zz - yz*yz;
double det_y = xx*zz - xz*xz;
double det_z = xx*yy - xy*xy;
double det_max = std::max({ det_x, det_y, det_z });
if (det_max <= 0) return{ 0, 0, 0, 0 };
rs2::float3 dir{};
if (det_max == det_x)
{
float a = static_cast<float>((xz*yz - xy*zz) / det_x);
float b = static_cast<float>((xy*yz - xz*yy) / det_x);
dir = { 1, a, b };
}
else if (det_max == det_y)
{
float a = static_cast<float>((yz*xz - xy*zz) / det_y);
float b = static_cast<float>((xy*xz - yz*xx) / det_y);
dir = { a, 1, b };
}
else
{
float a = static_cast<float>((yz*xy - xz*yy) / det_z);
float b = static_cast<float>((xz*xy - yz*xx) / det_z);
dir = { a, b, 1 };
}
return plane_from_point_and_normal(centroid, dir.normalize());
}
inline double evaluate_pixel(const plane& p, const rs2_intrinsics* intrin, float x, float y, float distance, float3& output)
{
float pixel[2] = { x, y };
rs2_deproject_pixel_to_point(&output.x, intrin, pixel, distance);
return evaluate_plane(p, output);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y, float min, float max)
{
float3 point;
auto far = evaluate_pixel(p, intrin, x, y, max, point);
if (fabs(max - min) < 1e-3) return point;
auto near = evaluate_pixel(p, intrin, x, y, min, point);
if (far*near > 0) return{ 0, 0, 0 };
auto avg = (max + min) / 2;
auto mid = evaluate_pixel(p, intrin, x, y, avg, point);
if (mid*near < 0) return approximate_intersection(p, intrin, x, y, min, avg);
return approximate_intersection(p, intrin, x, y, avg, max);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y)
{
return approximate_intersection(p, intrin, x, y, 0.f, 1000.f);
}
inline snapshot_metrics analyze_depth_image(
const rs2::video_frame& frame,
float units, float baseline_mm,
const rs2_intrinsics * intrin,
rs2::region_of_interest roi,
const int ground_truth_mm,
bool plane_fit_present,
std::vector<single_metric_data>& samples,
bool record,
callback_type callback)
{
auto pixels = (const uint16_t*)frame.get_data();
const auto w = frame.get_width();
const auto h = frame.get_height();
snapshot_metrics result{ w, h, roi, {} };
std::mutex m;
std::vector<rs2::float3> roi_pixels;
//#pragma omp parallel for - TODO optimization envisaged
for (int y = roi.min_y; y < roi.max_y; ++y)
for (int x = roi.min_x; x < roi.max_x; ++x)
{
auto depth_raw = pixels[y*w + x];
if (depth_raw)
{
// units is float
float pixel[2] = { float(x), float(y) };
float point[3];
auto distance = depth_raw * units;
rs2_deproject_pixel_to_point(point, intrin, pixel, distance);
std::lock_guard<std::mutex> lock(m);
roi_pixels.push_back({ point[0], point[1], point[2] });
}
}
if (roi_pixels.size() < 3) { // Not enough pixels in RoI to fit a plane
return result;
}
plane p = plane_from_points(roi_pixels);
if (p == plane{ 0, 0, 0, 0 }) { // The points in RoI don't span a valid plane
return result;
}
// Calculate intersection point of the camera's optical axis with the plane fit in camera's CS
float3 plane_fit_pivot = approximate_intersection(p, intrin, intrin->ppx, intrin->ppy);
// Find the distance between the "rectified" fit and the ground truth planes.
float plane_fit_to_gt_dist_mm = (ground_truth_mm > 0.f) ? (plane_fit_pivot.z * 1000 - ground_truth_mm): 0;
callback(roi_pixels, p, roi, baseline_mm, intrin->fx, ground_truth_mm, plane_fit_present, plane_fit_to_gt_dist_mm, record, samples);
result.p = p;
result.plane_corners[0] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.min_y));
result.plane_corners[1] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.min_y));
result.plane_corners[2] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.max_y));
result.plane_corners[3] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.max_y));
// Distance of origin (the camera) to the plane is the distance to the intersection point
result.distance = is_valid(result.plane_corners) ? plane_fit_pivot.length()*1000 : -1;
// Angle can be calculated from param C
result.angle = static_cast<float>(std::acos(std::abs(p.c)) / M_PI * 180.);
// Calculate normal
auto n = float3{ p.a, p.b, p.c };
auto cam = float3{ 0.f, 0.f, -1.f };
auto dot = n * cam;
auto u = cam - n * dot;
result.angle_x = u.x;
result.angle_y = u.y;
return result;
}
}
}
|
mcrat.c | /*
# Program to run a Monte Carlo radiation transfer through the 2D
# simulations of GRB jets.
#
# Python code written by D. Lazzati at Oregonstate, C code written by Tyler Parsotan @ Oregon State
# ver 0.1 July 8, 2015
# ver 1.1 July 20, 2015: added record of number of scatterings, included
# all terms in weight. Should now give correct light curves.
# ver 1.2 July 21, 2015: added parameter file to keep track of input
# params of each simulation
# ver 2.0 July 22, 2015: corrected the problem that arises when there is
# no scattering in the time span of one frame. Fixed output arrays dimension.
# ver 2.1 July 25, 2015: fixed bug that did not make the number of
# scattering grow with the number of photons.
# ver 3.0 July 28, 2015: using scipy nearest neighbor interpolation to
# speed things up. Gained about factor 2
# ver 3.1 July 29, 2015: added radial spread of photon injection points
# ver 3.2 July 31, 2015: added Gamma to the weight of photons!!!
# ver 4.0 Aug 5, 2015: try to speed up by inverting cycle
# ver 4.1 Aug 8, 2015: add spherical test as an option
# ver 4.2 Aug 9, 2015: saving files appending rather than re-writing
# ver 4.3 Aug 11, 2015: corrected error in the calculation of the local temperature
# ver 4.4 Aug 13, 2015: added cylindrical test
# ver 4.5 Aug 18, 2015: fixd various problems pointed by the cylindrical test
# ver 4.6 Aug 21, 2015: corrected mean free path for large radii
# ver 5.0 Aug 25, 2015: corrected problem with high-T electrons and excess scatterings
# ver 5.1 Aug 25, 2015: cleaned-up coding
# ver 5.2 Sept 3, 2015: fixed problem with number of scatterings for multiple injections
*
* ver 6.0 Dec 28, 2016: rewrote the code in C, added checkpoint file so if the code is interrupted all the progress wont be lost, made the code only need to be compiled once for a given MC_XXX directory path
so you just need to supply the sub directory of MC_XXX as a command line argument
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
#include <math.h>
#include <gsl/gsl_rng.h>
//#include "mclib.h"
#include "mclib_3d.h"
#include <omp.h>
#define THISRUN "Science"
#define FILEPATH "/Users/Tylerparsotan/Documents/Box\ Sync/RIKEN_HYDRO_DATA/JP_HYDRODATA/"
#define FILEROOT "u"
#define MC_PATH "PHOTON_TEST/"
//#define MC_PATH "MC_16OI/Single_Photon_Cy_mc_total/"
#define MCPAR "riken_mc.par"
int main(int argc, char **argv)
{
//compile each time a macro is changed
// Define variables
char flash_prefix[200]="";
char mc_file[200]="" ;
char this_run[200]=THISRUN;
char *cyl="Cylindrical";
char *sph="Spherical";
char spect;//type of spectrum
char restrt;//restart or not
double fps, theta_jmin, theta_jmax ;//frames per second of sim, min opening angle of jet, max opening angle of jet in radians
double inj_radius_small, inj_radius_large, ph_weight_suggest ;//radius at chich photons are injected into sim
int frm0_small, frm0_large,last_frm, frm2_small, frm2_large, j=0, min_photons, max_photons ;//frame starting from, last frame of sim, frame of last injection
int num_thread=0, angle_count=0;
int half_threads=floor((num_thread/2));
int num_angles=0;
int dim_switch=0;
double *thread_theta=NULL; //saves ranges of thetas for each thread to go through
double delta_theta=0;
//new muliple threads injecting and propagating photons
const gsl_rng_type *rng_t;
gsl_rng **rng;
gsl_rng_env_setup();
rng_t = gsl_rng_ranlxs0;
//want to break up simulation by angle and injection frame & have each thread save data in its own folder
//have each thread check if its directory is made and if its restarting (delete evrything) or if its continuing with a previous simulation
//the angle and the injection frames will be the names of mc_dir, therefore read mc.par first in MC_XXX directory
//make strings of proper directories etc.
snprintf(flash_prefix,sizeof(flash_prefix),"%s%s",FILEPATH,FILEROOT );
//snprintf(mc_dir,sizeof(flash_prefix),"%s%s",FILEPATH,MC_PATH);
//snprintf(mc_file,sizeof(flash_prefix),"%s%s",mc_dir, MCPAR);
snprintf(mc_file,sizeof(flash_prefix),"%s%s%s",FILEPATH, MC_PATH,MCPAR);
//printf(">> mc.py: Reading mc.par\n");
readMcPar(mc_file, &fps, &theta_jmin, &theta_jmax, &delta_theta, &inj_radius_small,&inj_radius_large, &frm0_small , &frm0_large ,&last_frm ,&frm2_small, &frm2_large, &ph_weight_suggest, &min_photons, &max_photons, &spect, &restrt, &num_thread, &dim_switch); //thetas that comes out is in degrees
//printf("small: %d large: %d weights: %e, %c\n", min_photons, max_photons, ph_weight_suggest, restrt);
if (num_thread==0)
{
num_thread=omp_get_max_threads(); //if user specifies 0 in mc.par, default to max number of threads possible
}
half_threads=floor((num_thread/2));
//thread_theta=malloc(((half_threads)+1)*sizeof(double) );
rng = (gsl_rng **) malloc((num_thread ) * sizeof(gsl_rng *));
rng[0] = gsl_rng_alloc (rng_t); //initalize first random number generator to seed the others with random numbers
for(j=1;j<num_thread;j++)
{
rng[j] = gsl_rng_alloc (rng_t);
gsl_rng_set(rng[j],gsl_rng_get(rng[0]));
}
//divide up angles and frame injections among threads DONT WANT NUMBER OF THREADS TO BE ODD
//assign ranges to array that hold them
/*
delta_theta=(theta_jmax-theta_jmin)/(half_threads);
*(thread_theta+0)=theta_jmin;
//printf("%e\n", *(thread_theta+0));
for (j=1;j<(half_threads+1); j++)
{
*(thread_theta+j)=*(thread_theta+(j-1))+delta_theta;
//printf("%e\n", *(thread_theta+j)*180/M_PI);
}
*/
//leave angles in degrees here
num_angles=(int) (((theta_jmax-theta_jmin)/delta_theta)) ;//*(180/M_PI));
thread_theta=malloc( num_angles *sizeof(double) );
*(thread_theta+0)=theta_jmin;//*(180/M_PI);
printf("%e\n", *(thread_theta+0));
for (j=1;j<(num_angles); j++)
{
*(thread_theta+j)=*(thread_theta+(j-1))+delta_theta;
printf("%e\n", *(thread_theta+j));
}
//printf("half_threads: %d\n", half_threads);
//start parallel section with half the threads possible and assign each thread to its value of theta by rewriting its private value of theta min and max that was read from mcrat
omp_set_nested(1); //allow for nested parallelization
//omp_set_dynamic(0);
//omp_set_num_threads(num_thread);
//#pragma omp parallel firstprivate(theta_jmin_thread, theta_jmax_thread, mc_dir, phPtr, framestart, scatt_framestart, num_ph, restrt, time_now, st, dirp, file_count, mc_filename, mc_operation, dt_max)
//printf("%d\n", omp_get_num_threads() );
// for (angle_count=(int) (theta_jmin); angle_count< (int) (theta_jmax+1) ;angle_count=angle_count+delta_theta )
#pragma omp parallel for num_threads(num_thread) private(angle_count)
for (angle_count=0; angle_count< num_angles ;angle_count++ )
{
printf("%d\t%lf\n", omp_get_thread_num(), delta_theta );
double inj_radius;
int frm2, frm0;
char mc_filename[200]="";
char mc_filename_2[200]="";
char mc_operation[200]="";
char mc_dir[200]="" ;
int file_count = 0;
DIR * dirp;
struct dirent * entry;
struct stat st = {0};
double theta_jmin_thread=0, theta_jmax_thread=0;
theta_jmin_thread= (*(thread_theta+angle_count))*(M_PI/180);//(*(thread_theta+omp_get_thread_num() ));
theta_jmax_thread= ((*(thread_theta+angle_count))+delta_theta)*(M_PI/180);//(*(thread_theta+omp_get_thread_num()+1 ));
printf("Thread %d: %0.1lf, %0.1lf \n %d %d\n", omp_get_thread_num(), theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm2_small, frm2_large );
snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this
printf(">> Thread %d in MCRaT: I am working on path: %s \n",omp_get_thread_num(), mc_dir );
if ((theta_jmin_thread >= 0) && (theta_jmax_thread <= (2*M_PI/180) )) //if within small angle (0-2 degrees) use _small inj_radius and frm2
{
inj_radius=inj_radius_small;
frm2=frm2_small;
frm0=frm0_small;
}
else
{
inj_radius=inj_radius_large;
frm2=frm2_large;
frm0=frm0_large;
}
printf("Thread %d: %0.1lf, %0.1lf \n %d %e %d\n", omp_get_thread_num(), theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm2, inj_radius, frm0 );
//want to also have another set of threads that each has differing ranges of frame injections therefore do nested parallelism here so each thread can read its own checkpoint file
//#pragma omp parallel num_threads(2) firstprivate(restrt)
{
char flash_file[200]="";
char log_file[200]="";
FILE *fPtr=NULL; //pointer to log file for each thread
double *xPtr=NULL, *yPtr=NULL, *rPtr=NULL, *thetaPtr=NULL, *velxPtr=NULL, *velyPtr=NULL, *densPtr=NULL, *presPtr=NULL, *gammaPtr=NULL, *dens_labPtr=NULL;
double *phiPtr=NULL, *velzPtr=NULL, *zPtr=NULL;
double *szxPtr=NULL,*szyPtr=NULL, *tempPtr=NULL; //pointers to hold data from FLASH files
int num_ph=0, array_num=0, ph_scatt_index=0, max_scatt=0, min_scatt=0,i=0; //number of photons produced in injection algorithm, number of array elleemnts from reading FLASH file, index of photon whch does scattering, generic counter
double dt_max=0, thescatt=0, accum_time=0;
double gamma_infinity=0, time_now=0, time_step=0, avg_scatt=0; //gamma_infinity not used?
double ph_dens_labPtr=0, ph_vxPtr=0, ph_vyPtr=0, ph_tempPtr=0, ph_vzPtr=0;// *ph_cosanglePtr=NULL ;
double min_r=0, max_r=0;
int frame=0, scatt_frame=0, frame_scatt_cnt=0, scatt_framestart=0, framestart=0;
struct photon *phPtr=NULL; //pointer to array of photons
//if (omp_get_thread_num()==0)
//{
//printf( "A:%d Im Thread: %d with ancestor %d working in %s\n", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), mc_dir);
//}
if (restrt=='c')
{
printf(">> mc.py: Reading checkpoint\n");
//#pragma omp critical
{
readCheckpoint(mc_dir, &phPtr, frm0, &framestart, &scatt_framestart, &num_ph, &restrt, &time_now);
/*
for (i=0;i<num_ph;i++)
{
printf("%e,%e,%e, %e,%e,%e, %e, %e\n",(phPtr+i)->p0, (phPtr+i)->p1, (phPtr+i)->p2, (phPtr+i)->p3, (phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2, (phPtr+i)->num_scatt );
}
*/
if (restrt=='c')
{
printf(">> Thread %d with ancestor %d: Starting from photons injected at frame: %d out of %d\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),framestart, frm2);
printf(">> Thread %d with ancestor %d: Continuing scattering %d photons from frame: %d\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),num_ph, scatt_framestart);
printf(">> Thread %d with ancestor %d: The time now is: %e\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),time_now);
}
else
{
printf(">> Thread %d with ancestor %d: Continuing simulation by injecting photons at frame: %d out of %d\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),framestart, frm2); //starting with new photon injection is same as restarting sim
}
}
}
else if (stat(mc_dir, &st) == -1)
{
mkdir(mc_dir, 0777); //make the directory with full permissions
framestart=frm0; //if restarting then start from parameters given in mc.par file
scatt_framestart=frm0;
}
else
{
//remove everything from MC directory to ensure no corruption of data if theres other files there besides the mc.par file
//for a checkpoint implementation, need to find the latest checkpoint file and read it and not delete the files
#pragma omp critical
{
printf(">> Thread %d with ancestor %d: Cleaning directory \n",omp_get_thread_num(), omp_get_ancestor_thread_num(1));
dirp = opendir(mc_dir);
while ((entry = readdir(dirp)) != NULL)
{
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
file_count++; //count how many files are in dorectory
}
}
printf("File count %d\n", file_count);
if (file_count>0)
{
for (i=0;i<=last_frm;i++)
{
snprintf(mc_filename,sizeof(mc_filename),"%s%s%d%s", mc_dir,"mcdata_",i,"_P0.dat");
if(( access( mc_filename, F_OK ) != -1 ) )
{
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s%d%s","exec rm ", mc_dir,"mcdata_",i,"_*.dat"); //prepares string to remove *.dat in mc_dir
printf("%s\n",mc_operation);
system(mc_operation);
//snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s%d%s","exec rm ", mc_dir,"mcdata_",i,"_*");
//system(mc_operation);
}
}
//snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW_*.dat"); //prepares string to remove *.dat in mc_dir
//system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW.dat"); //prepares string to remove *.dat in mc_dir
system(mc_operation);
//snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_chkpt_*.dat"); //prepares string to remove *.dat in mc_dir
//system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_output_*.log"); //prepares string to remove *.log in mc_dir
system(mc_operation);
}
}
framestart=frm0; //if restarting then start from parameters given in mc.par file
scatt_framestart=frm0;
}
dt_max=1.0/fps;
//#pragma omp barrier
snprintf(log_file,sizeof(log_file),"%s%s",mc_dir,"mc_output.log" );
printf("%s\n",log_file);
fPtr=fopen(log_file, "w");
fprintf(fPtr, "%d Im Thread: %d with ancestor %d Starting on Frame: %d scatt_framestart: %d\n", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), framestart, scatt_framestart);
fflush(fPtr);
//fclose(fPtr);
//#pragma omp barrier
//exit(0);
//loop over frames
//for a checkpoint implementation, start from the last saved "frame" value and go to the saved "frm2" value
//#pragma omp for
for (frame=framestart;frame<=frm2;frame++)
{
if (restrt=='r')
{
time_now=frame/fps; //for a checkpoint implmentation, load the saved "time_now" value when reading the ckeckpoint file otherwise calculate it normally
}
//printf(">> mc.py: Working on Frame %d\n", frame);
fprintf(fPtr,"%d Im Thread: %d with ancestor %d Working on Frame: %d\n", omp_get_num_threads(), omp_get_thread_num(), omp_get_ancestor_thread_num(1), frame);
fflush(fPtr);
if (restrt=='r')
{
//exit(0);
//read in FLASH file
//for a checkpoint implmentation, dont need to read the file yet
if (dim_switch==0)
{
//put proper number at the end of the flash file
modifyFlashName(flash_file, flash_prefix, frame, dim_switch);
fprintf(fPtr,">> Im Thread: %d with ancestor %d: Opening FLASH file %s\n",omp_get_thread_num(), omp_get_ancestor_thread_num(1), flash_file);
fflush(fPtr);
#pragma omp critical
{
readAndDecimate(flash_file, inj_radius, fps, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fPtr);
}
}
else
{
read_hydro(FILEPATH, frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fps, fPtr);
}
//check for run type
if(strcmp(cyl, this_run)==0)
{
//printf("In cylindrical prep\n");
cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);
}
else if (strcmp(sph, this_run)==0)
{
sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num );
}
//determine where to place photons and how many should go in a given place
//for a checkpoint implmentation, dont need to inject photons, need to load photons' last saved data
fprintf(fPtr,">> Thread: %d with ancestor %d: Injecting photons\n",omp_get_thread_num(), omp_get_ancestor_thread_num(1));
fflush(fPtr);
if (dim_switch==0)
{
photonInjection(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, velxPtr, velyPtr,rng[omp_get_thread_num()] );
}
else
{
photonInjection3D(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, zPtr, szxPtr, szyPtr,rPtr,thetaPtr, phiPtr, tempPtr, velxPtr, velyPtr, velzPtr, rng[omp_get_thread_num()] );
}
//printf("%d\n",num_ph); //num_ph is one more photon than i actually have
/*
for (i=0;i<num_ph;i++)
printf("%e,%e,%e \n",(phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2 );
*/
}
//scatter photons all the way thoughout the jet
//for a checkpoint implmentation, start from the last saved "scatt_frame" value eh start_frame=frame or start_frame=cont_frame
if (restrt=='r')
{
scatt_framestart=frame; //have to make sure that once the inner loop is done and the outer loop is incrememnted by one the inner loop starts at that new value and not the one read by readCheckpoint()
}
for (scatt_frame=scatt_framestart;scatt_frame<=last_frm;scatt_frame++)
{
fprintf(fPtr,">>\n");
fprintf(fPtr,">> Thread %d with ancestor %d : Working on photons injected at frame: %d out of %d\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),frame, frm2);
fprintf(fPtr,">> Thread %d with ancestor %d: %s - Working on frame %d\n",omp_get_thread_num(), omp_get_ancestor_thread_num(1), THISRUN, scatt_frame);
fprintf(fPtr,">> Thread %d with ancestor %d: Opening file...\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1));
fflush(fPtr);
if (dim_switch==0)
{
//put proper number at the end of the flash file
modifyFlashName(flash_file, flash_prefix, scatt_frame, dim_switch);
#pragma omp critical
{
phMinMax(phPtr, num_ph, &min_r, &max_r);
readAndDecimate(flash_file, inj_radius, fps, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fPtr);
}
}
else
{
phMinMax(phPtr, num_ph, &min_r, &max_r);
read_hydro(FILEPATH, scatt_frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fps, fPtr);
}
//check for run type
if(strcmp(cyl, this_run)==0)
{
//printf("In cylindrical prep\n");
cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);
}
else if (strcmp(sph, this_run)==0)
{
sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num );
}
//printf("The result of read and decimate are arrays with %d elements\n", array_num);
fprintf(fPtr,">> Thread %d with ancestor %d: propagating and scattering %d photons\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1),num_ph);
fflush(fPtr);
frame_scatt_cnt=0;
while (time_now<((scatt_frame+1)/fps))
{
//if simulation time is less than the simulation time of the next frame, keep scattering in this frame
//go through each photon and find blocks closest to each photon and properties of those blocks to calulate mean free path
//and choose the photon with the smallest mfp and calculate the timestep
ph_scatt_index=findNearestPropertiesAndMinMFP(phPtr, num_ph, array_num, &time_step, xPtr, yPtr, zPtr, velxPtr, velyPtr, velzPtr, dens_labPtr, tempPtr,\
&ph_dens_labPtr, &ph_vxPtr, &ph_vyPtr, &ph_vzPtr, &ph_tempPtr, rng[omp_get_thread_num()], dim_switch);
//printf("In main: %e, %d, %e, %e\n", *(ph_num_scatt+ph_scatt_index), ph_scatt_index, time_step, time_now);
printf("In main: %e, %d, %e, %e\n",((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);
if (time_step<dt_max)
{
//update number of scatterings and time
//(*(ph_num_scatt+ph_scatt_index))+=1;
((phPtr+ph_scatt_index)->num_scatt)+=1;
frame_scatt_cnt+=1;
time_now+=time_step;
updatePhotonPosition(phPtr, num_ph, time_step);
//scatter the photon
//printf("Passed Parameters: %e, %e, %e\n", (ph_vxPtr), (ph_vyPtr), (ph_tempPtr));
photonScatter( (phPtr+ph_scatt_index), (ph_vxPtr), (ph_vyPtr), ph_vzPtr, (ph_tempPtr), rng[omp_get_thread_num()] , dim_switch, fPtr);
//if (frame_scatt_cnt%1000 == 0)
{
fprintf(fPtr,"Scattering Number: %d\n", frame_scatt_cnt);
fprintf(fPtr,"The local temp is: %e\n", (ph_tempPtr));
fprintf(fPtr,"Average photon energy is: %e\n", averagePhotonEnergy(phPtr, num_ph)); //write function to average over the photons p0 and then do (*3e10/1.6e-9)
fflush(fPtr);
}
}
else
{
time_now+=dt_max;
//for each photon update its position based on its momentum
updatePhotonPosition(phPtr, num_ph, dt_max);
}
//printf("In main 2: %e, %d, %e, %e\n", ((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);
}
//get scattering statistics
phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt);
fprintf(fPtr,"The number of scatterings in this frame is: %d\n", frame_scatt_cnt);
fprintf(fPtr,"The last time step was: %lf.\nThe time now is: %lf\n", time_step,time_now);
fprintf(fPtr,"The maximum number of scatterings for a photon is: %d\nThe minimum number of scattering for a photon is: %d\n", max_scatt, min_scatt);
fprintf(fPtr,"The average number of scatterings thus far is: %lf\n", avg_scatt);
fflush(fPtr);
printPhotons(phPtr, num_ph, scatt_frame , frame, mc_dir);
exit(0);
//for a checkpoint implmentation,save the checkpoint file here after every 5 frames or something
//save the photons data, the scattering number data, the scatt_frame value, and the frame value
//WHAT IF THE PROGRAM STOPS AFTER THE LAST SCATT_FRAME, DURING THE FIRST SCATT_FRAME OF NEW FRAME VARIABLE - save restrt variable as 'r'
fprintf(fPtr, ">> Thread %d with ancestor %d: Making checkpoint file\n", omp_get_thread_num(), omp_get_ancestor_thread_num(1));
fflush(fPtr);
saveCheckpoint(mc_dir, frame, scatt_frame, num_ph, time_now, phPtr, last_frm);
free(xPtr);free(yPtr);free(szxPtr);free(szyPtr);free(rPtr);free(thetaPtr);free(velxPtr);free(velyPtr);free(densPtr);free(presPtr);
free(gammaPtr);free(dens_labPtr);free(tempPtr);
xPtr=NULL; yPtr=NULL; rPtr=NULL;thetaPtr=NULL;velxPtr=NULL;velyPtr=NULL;densPtr=NULL;presPtr=NULL;gammaPtr=NULL;dens_labPtr=NULL;
szxPtr=NULL; szyPtr=NULL; tempPtr=NULL;
}
restrt='r';//set this to make sure that the next iteration of propogating photons doesnt use the values from the last reading of the checkpoint file
free(phPtr);
phPtr=NULL;
}
}//end omp parallel inner section
//merge files from each worker thread within a directory
//dirFileMerge(mc_dir, frm0, last_frm);
} //end omp parallel section
//gsl_rng_free (rand);
//free rand number generator
for (j=0;j<num_thread;j++)
{
gsl_rng_free(rng[j]);
}
free(rng);
free(thread_theta);
return 0;
}
|
SpVec.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SPVEC_H_
#define SRC_SPVEC_H_
#include <string>
#include <algorithm>
#include <vector>
#include "GMDP/vectors/DenseSegment.h"
template <typename SpSegment>
class SpVec {
public:
std::string name;
int nsegments;
int n;
int num_tiles_x;
int global_nrank, global_myrank;
std::vector<int> nodeIds;
std::vector<int> start_id;
std::vector<SpSegment*> segments;
friend boost::serialization::access;
template<class Archive>
void serialize(Archive& ar, const unsigned int version) {
ar & name;
ar & nsegments;
ar & n;
ar & num_tiles_x;
ar & global_nrank;
ar & global_myrank;
ar & nodeIds;
ar & start_id;
ar & segments;
}
SpVec() {};
SpVec(int _n, int _num_tiles_x,
int (*_pfn)(int, int, int)) {
global_nrank = get_global_nrank();
global_myrank = get_global_myrank();
num_tiles_x = _num_tiles_x;
n = _n;
int vx, vy;
int roundup = 256;
nsegments = num_tiles_x;
vx =
((((n + nsegments - 1) / nsegments) + roundup - 1) / roundup) * roundup;
// In case the roundup affected the num tiles
for (int j = 0; j < num_tiles_x; j++) {
nodeIds.push_back(_pfn(j, num_tiles_x, global_nrank));
}
for (int j = 0; j < num_tiles_x; j++) {
start_id.push_back(std::min(vx * j, n));
}
start_id.push_back(n);
// Copy metadata
assert(nsegments > 0);
// Allocate space for tiles
for (int j = 0; j < nsegments; j++) {
segments.push_back(new SpSegment(start_id[j + 1] - start_id[j]));
}
}
~SpVec()
{
for(auto it = segments.begin() ; it != segments.end() ; it++)
{
delete *it;
}
segments.clear();
}
inline int getPartition(int src) const {
for (int i = 0; i < nsegments; i++) {
if ((src > start_id[i]) && (src <= start_id[i + 1])) {
return i;
}
}
return -1;
}
template <typename T>
void get_edges(edgelist_t<T> * blob) const
{
blob->nnz = 0;
blob->m = n;
blob->n = 1;
for(int segment = 0 ; segment < nsegments ; segment++)
{
if(nodeIds[segment] == global_myrank)
{
blob->nnz += segments[segment]->compute_nnz();
}
}
if(blob->nnz > 0)
{
blob->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)blob->nnz * (uint64_t)sizeof(edge_t<T>), 64));
unsigned int nnzs = 0;
for(int segment = 0 ; segment < nsegments ; segment++)
{
if(nodeIds[segment] == global_myrank)
{
segments[segment]->get_edges(blob->edges + nnzs, start_id[segment]);
nnzs += segments[segment]->compute_nnz();
}
}
}
}
// Note: replace with all-to-all-v
template <typename T>
void ingestEdgelist(edgelist_t<T> blob) {
int nnz_l = blob.nnz;
edge_t<T>* edge_list = blob.edges;
int m = blob.m;
assert(blob.n == 1);
printf("Rank %d: Before shuffle %d edges\n", global_myrank, blob.nnz);
// Done with partitioning
// Now, assign.
int* assignment = new int[nnz_l];
#pragma omp parallel for
for (int i = 0; i < nnz_l; i++) {
int tile = getPartition(edge_list[i].src);
assert(tile != -1);
assignment[i] = nodeIds[tile];
}
// assignment over
MPI_Barrier(MPI_COMM_WORLD);
// pack into messages
// calculate message sizes
int* count = new int[global_nrank];
int* recv_count = new int[global_nrank];
MPI_Request* mpi_req = new MPI_Request[2 * global_nrank];
MPI_Status* mpi_status = new MPI_Status[2 * global_nrank];
memset(count, 0, sizeof(int) * global_nrank);
for (int i = 0; i < nnz_l; i++) {
int r = assignment[i];
count[r]++;
}
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(&count[i], 1, MPI_INT, i, global_myrank, MPI_COMM_WORLD,
&mpi_req[i]);
}
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&recv_count[i], 1, MPI_INT, i, i, MPI_COMM_WORLD,
&mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
MPI_Barrier(MPI_COMM_WORLD);
// pack the messages and send
edge_t<T>** msg = new edge_t<T>* [global_nrank];
int* offsets = new int[global_nrank];
for (int i = 0; i < global_nrank; i++) {
msg[i] = new edge_t<T>[count[i]];
offsets[i] = 0;
}
for (int i = 0; i < nnz_l; i++) {
int r = assignment[i];
msg[r][offsets[r]] = edge_list[i];
++offsets[r];
}
for (int i = 0; i < global_nrank; i++) {
MPI_Isend(msg[i], (uint64_t)sizeof(edge_t<T>) * (uint64_t)count[i],
MPI_CHAR, i, global_myrank, MPI_COMM_WORLD, &mpi_req[i]);
}
// receive messages into final_edge_list
int new_nnz = 0;
int* local_hist = new int[global_nrank + 1];
local_hist[0] = 0;
for (int i = 0; i < global_nrank; i++) {
new_nnz += recv_count[i];
local_hist[i + 1] = local_hist[i] + recv_count[i];
}
edge_t<T>* final_edge_list = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(edge_t<T>), 64));
for (int i = 0; i < global_nrank; i++) {
MPI_Irecv(&final_edge_list[local_hist[i]],
(uint64_t)sizeof(edge_t<T>) * (uint64_t)recv_count[i], MPI_CHAR,
i, i, MPI_COMM_WORLD, &mpi_req[i + global_nrank]);
}
MPI_Waitall(2 * global_nrank, mpi_req, mpi_status);
for (int i = 0; i < global_nrank; i++) {
delete[] msg[i];
}
delete[] msg;
delete[] local_hist;
delete[] offsets;
delete[] count;
delete[] recv_count;
delete[] mpi_req;
delete[] mpi_status;
printf("Rank %d: After shuffle %d edges\n", global_myrank, new_nnz);
for (int i = 0; i < new_nnz; i++) {
int ival, jval;
int tile = getPartition(final_edge_list[i].src);
assert(tile != -1);
assert(nodeIds[tile] == global_myrank);
}
MPI_Barrier(MPI_COMM_WORLD);
// Sort these edges by segment ID
edge_t<T>* edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(edge_t<T>), 64));
int* partitions = reinterpret_cast<int*>(
_mm_malloc((uint64_t)new_nnz * (uint64_t)sizeof(int), 64));
uint64_t* counts = reinterpret_cast<uint64_t*>(
_mm_malloc((nsegments) * sizeof(uint64_t), 64));
uint64_t* start_nzs = reinterpret_cast<uint64_t*>(
_mm_malloc((nsegments + 1) * sizeof(uint64_t), 64));
memset(counts, 0, (nsegments) * sizeof(uint64_t));
memset(start_nzs, 0, (nsegments+1) * sizeof(uint64_t));
for (uint64_t i = 0; i < (uint64_t)new_nnz; i++) {
partitions[i] = getPartition(final_edge_list[i].src);
counts[partitions[i]]++;
}
uint64_t acc = 0;
for (int i = 0; i < nsegments; i++) {
start_nzs[i] = acc;
acc += counts[i];
}
start_nzs[nsegments] = acc;
memset(counts, 0, (nsegments) * sizeof(uint64_t));
for (uint64_t i = 0; i < (uint64_t)new_nnz; i++) {
int new_idx = start_nzs[partitions[i]] + counts[partitions[i]];
assert(new_idx < new_nnz);
assert(new_idx >= 0);
assert(partitions[i] < nsegments);
assert(partitions[i] >= 0);
edges[new_idx] = final_edge_list[i];
counts[partitions[i]]++;
}
if(new_nnz > 0)
{
_mm_free(final_edge_list);
_mm_free(partitions);
}
for (int segment_i = 0; segment_i < nsegments; segment_i++) {
if (nodeIds[segment_i] == global_myrank) {
int tile_m = start_id[segment_i + 1] - start_id[segment_i];
int nnz = counts[segment_i];
int start_nz = start_nzs[segment_i];
assert(start_nz <= new_nnz);
assert(nnz <= new_nnz);
if(nnz > 0)
{
segments[segment_i]->ingestEdges(edges + start_nz, tile_m, nnz, start_id[segment_i]);
}
}
}
_mm_free(counts);
_mm_free(start_nzs);
_mm_free(edges);
MPI_Barrier(MPI_COMM_WORLD);
}
template<typename T>
void set(int idx, T val) {
int partitionId = getPartition(idx);
assert(partitionId >= 0);
if (nodeIds[partitionId] == global_myrank) {
assert(segments[partitionId]->capacity > 0);
segments[partitionId]->set(idx - start_id[partitionId], val);
}
}
void unset(int idx) {
int partitionId = getPartition(idx);
assert(partitionId >= 0);
if (nodeIds[partitionId] == global_myrank) {
assert(segments[partitionId]->capacity > 0);
segments[partitionId]->unset(idx - start_id[partitionId]);
}
}
template<typename T>
void setAll(T val) {
for(int segmentId = 0 ; segmentId < nsegments ; segmentId++)
{
if(nodeIds[segmentId] == global_myrank)
{
segments[segmentId]->setAll(val);
}
}
}
template<typename T>
void get(const int idx, T * myres) const {
int partitionId = getPartition(idx);
assert(partitionId >= 0);
if (nodeIds[partitionId] == global_myrank) {
SpSegment * segment = segments[partitionId];
*myres = segment->get(idx - start_id[partitionId]);
}
}
int getNNZ()
{
int total_nnz = 0;
for(int s = 0 ; s < nsegments ; s++)
{
if(nodeIds[s] == global_myrank)
{
//total_nnz += segments[s].getNNZ();
total_nnz += segments[s]->compute_nnz();
}
}
// global reduction
MPI_Allreduce(MPI_IN_PLACE, &total_nnz, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return total_nnz;
}
bool node_owner(const int idx) const {
int partitionId = getPartition(idx);
assert(partitionId >= 0);
bool v;
if (nodeIds[partitionId] == global_myrank) {
v = true;
} else {
v = false;
}
return v;
}
void save(std::string fname, bool includeHeader ) const {
for(int segment = 0 ; segment < nsegments ; segment++)
{
if(nodeIds[segment] == global_myrank)
{
segments[segment]->save(fname + std::to_string(segment), start_id[segment], n, includeHeader);
}
}
}
};
#endif // SRC_SPVEC_H_
|
pr70550-1.c | /* PR middle-end/70550 */
/* { dg-do compile } */
/* { dg-additional-options "-Wuninitialized" } */
#ifdef __SIZEOF_INT128__
typedef __int128 T;
#else
typedef long long T;
#endif
void bar (T);
#pragma omp declare target (bar)
void
foo (void)
{
{
int i;
#pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used uninitialized in this function" } */
{
i = 26;
bar (i);
}
}
{
T j;
#pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used uninitialized in this function" } */
{
j = 37;
bar (j);
}
}
{
int i;
#pragma omp target /* { dg-bogus "is used uninitialized in this function" } */
{
i = 26;
bar (i);
}
}
{
T j;
#pragma omp target /* { dg-bogus "is used uninitialized in this function" } */
{
j = 37;
bar (j);
}
}
{
int i;
#pragma omp target firstprivate (i) /* { dg-warning "is used uninitialized in this function" } */
{
i = 26;
bar (i);
}
}
{
T j;
#pragma omp target firstprivate (j) /* { dg-warning "is used uninitialized in this function" } */
{
j = 37;
bar (j);
}
}
{
int i;
#pragma omp target private (i) /* { dg-bogus "is used uninitialized in this function" } */
{
i = 26;
bar (i);
}
}
{
T j;
#pragma omp target private (j) /* { dg-bogus "is used uninitialized in this function" } */
{
j = 37;
bar (j);
}
}
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_EXCEPT_INVALID SIMDE_MM_EXCEPT_INVALID
#define _MM_EXCEPT_DENORM SIMDE_MM_EXCEPT_DENORM
#define _MM_EXCEPT_DIV_ZERO SIMDE_MM_EXCEPT_DIV_ZERO
#define _MM_EXCEPT_OVERFLOW SIMDE_MM_EXCEPT_OVERFLOW
#define _MM_EXCEPT_UNDERFLOW SIMDE_MM_EXCEPT_UNDERFLOW
#define _MM_EXCEPT_INEXACT SIMDE_MM_EXCEPT_INEXACT
#define _MM_EXCEPT_MASK SIMDE_MM_EXCEPT_MASK
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_MASK_INVALID SIMDE_MM_MASK_INVALID
#define _MM_MASK_DENORM SIMDE_MM_MASK_DENORM
#define _MM_MASK_DIV_ZERO SIMDE_MM_MASK_DIV_ZERO
#define _MM_MASK_OVERFLOW SIMDE_MM_MASK_OVERFLOW
#define _MM_MASK_UNDERFLOW SIMDE_MM_MASK_UNDERFLOW
#define _MM_MASK_INEXACT SIMDE_MM_MASK_INEXACT
#define _MM_MASK_MASK SIMDE_MM_MASK_MASK
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_FLUSH_ZERO_MASK SIMDE_MM_FLUSH_ZERO_MASK
#define _MM_FLUSH_ZERO_ON SIMDE_MM_FLUSH_ZERO_ON
#define _MM_FLUSH_ZERO_OFF SIMDE_MM_FLUSH_ZERO_OFF
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
SIMDE_MM_GET_FLUSH_ZERO_MODE (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
#else
return SIMDE_MM_FLUSH_ZERO_OFF;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_FLUSH_ZERO_MODE (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_FLUSH_ZERO_MODE(a);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_FLUSH_ZERO_MODE(a) SIMDE_MM_SET_FLUSH_ZERO_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_round_ps (simde__m128 a, int rounding, int lax_rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15)
SIMDE_REQUIRE_CONSTANT_RANGE(lax_rounding, 0, 1) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
(void) lax_rounding;
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_rint(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndnq_f32(a_.neon_f32);
#elif defined(simde_math_roundevenf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundevenf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE)
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps((a), (rounding))
#else
#define simde_mm_round_ps(a, rounding) simde_x_mm_round_ps((a), (rounding), 0)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps((a), (rounding))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_i8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512VL_NATIVE)
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_ternarylogic_epi32(ai, ai, ai, 0x55));
#elif defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
simde_float32 mask_;
uint32_t u32_ = UINT32_C(0x7FFFFFFF);
simde_memcpy(&mask_, &u32_, sizeof(u32_));
return _mm_and_ps(_mm_set1_ps(mask_), a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
r_.altivec_f32 = vec_nor(r_.altivec_f32, r_.altivec_f32);
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && SIMDE_NATURAL_VECTOR_SIZE_GE(128)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((a_.f32[0] > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
(a_.f32[0] < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(a_.neon_i16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
simde_float32 v = simde_math_roundf(a_.f32[i]);
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.f32[i];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
r_.i32[i] = ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
#endif
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
simde_float32 v = a_.f32[0];
#if !defined(SIMDE_FAST_CONVERSION_RANGE)
return ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) && (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX))) ?
SIMDE_CONVERT_FTOI(int32_t, v) : INT32_MIN;
#else
return SIMDE_CONVERT_FTOI(int32_t, v);
#endif
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) || (defined(SIMDE_ENABLE_NATIVE_ALIASES) && !defined(SIMDE_ARCH_AMD64))
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if defined(SIMDE_BUG_CLANG_44589)
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if defined(SIMDE_BUG_CLANG_44589)
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#if HEDLEY_HAS_WARNING("-Wold-style-cast")
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), HEDLEY_REINTERPRET_CAST(simde__m64 const*, (mem_addr)))
#else
#define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif (defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_13_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(HEDLEY_GCC_VERSION)
#define SIMDE_MM_HINT_NTA HEDLEY_STATIC_CAST(enum _mm_hint, 0)
#define SIMDE_MM_HINT_T0 HEDLEY_STATIC_CAST(enum _mm_hint, 1)
#define SIMDE_MM_HINT_T1 HEDLEY_STATIC_CAST(enum _mm_hint, 2)
#define SIMDE_MM_HINT_T2 HEDLEY_STATIC_CAST(enum _mm_hint, 3)
#define SIMDE_MM_HINT_ENTA HEDLEY_STATIC_CAST(enum _mm_hint, 4)
#define SIMDE_MM_HINT_ET0 HEDLEY_STATIC_CAST(enum _mm_hint, 5)
#define SIMDE_MM_HINT_ET1 HEDLEY_STATIC_CAST(enum _mm_hint, 6)
#define SIMDE_MM_HINT_ET2 HEDLEY_STATIC_CAST(enum _mm_hint, 7)
#else
#define SIMDE_MM_HINT_NTA 0
#define SIMDE_MM_HINT_T0 1
#define SIMDE_MM_HINT_T1 2
#define SIMDE_MM_HINT_T2 3
#define SIMDE_MM_HINT_ENTA 4
#define SIMDE_MM_HINT_ET0 5
#define SIMDE_MM_HINT_ET1 6
#define SIMDE_MM_HINT_ET2 7
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wreserved-id-macro")
_Pragma("clang diagnostic ignored \"-Wreserved-id-macro\"")
#endif
#undef _MM_HINT_NTA
#define _MM_HINT_NTA SIMDE_MM_HINT_NTA
#undef _MM_HINT_T0
#define _MM_HINT_T0 SIMDE_MM_HINT_T0
#undef _MM_HINT_T1
#define _MM_HINT_T1 SIMDE_MM_HINT_T1
#undef _MM_HINT_T2
#define _MM_HINT_T2 SIMDE_MM_HINT_T2
#undef _MM_HINT_ETNA
#define _MM_HINT_ETNA SIMDE_MM_HINT_ETNA
#undef _MM_HINT_ET0
#define _MM_HINT_ET0 SIMDE_MM_HINT_ET0
#undef _MM_HINT_ET1
#define _MM_HINT_ET1 SIMDE_MM_HINT_ET1
#undef _MM_HINT_ET1
#define _MM_HINT_ET2 SIMDE_MM_HINT_ET2
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
#if defined(__clang__) && !SIMDE_DETECT_CLANG_VERSION_CHECK(10,0,0) /* https://reviews.llvm.org/D71718 */
#define simde_mm_prefetch(p, i) \
(__extension__({ \
HEDLEY_DIAGNOSTIC_PUSH \
HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
_mm_prefetch((p), (i)); \
HEDLEY_DIAGNOSTIC_POP \
}))
#else
#define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_shuffle_ps(a, b, imm8) \
__extension__({ \
float32x4_t ret; \
ret = vmovq_n_f32( \
vgetq_lane_f32(a, (imm8) & (0x3))); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(a, ((imm8) >> 2) & 0x3), \
ret, 1); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 4) & 0x3), \
ret, 2); \
ret = vsetq_lane_f32( \
vgetq_lane_f32(b, ((imm8) >> 6) & 0x3), \
ret, 3); \
})
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) || defined(SIMDE_ZARCH_ZVECTOR_14_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_i32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
Sigmoid.c | #include "../thnets.h"
#include <math.h>
#ifdef ONNX
void onnxload_Sigmoid(const void *graph, struct module *m, int nodeidx)
{
m->updateOutput = nn_Sigmoid_updateOutput;
m->type = MT_Sigmoid;
}
#endif
THFloatTensor *nn_Sigmoid_updateOutput(struct module *module, THFloatTensor *input)
{
THFloatTensor *output = module->output;
float *input_data, *output_data;
long i, n = THFloatTensor_nElement(input);
THFloatTensor_resizeAs(output, input);
input_data = THFloatTensor_data(input);
output_data = THFloatTensor_data(output);
#pragma omp parallel for
for(i = 0; i < n; i++)
output_data[i] = 1.0f / (1.0f + expf(-input_data[i]));
return output;
}
|
dbatch.c | #include "kog.h"
#include "mem.h"
#ifdef USE_MSR
#include "msr.h"
#endif /* USE_MSR */
#include "wre.h"
int main(int argc, char *argv[])
{
if (4 != argc) {
(void)fprintf(stderr, "%s n #batches infile\n", argv[0]);
return EXIT_FAILURE;
}
const size_t n = atoz(argv[1]);
if (!n) {
perror("atoz(n)");
return EXIT_FAILURE;
}
// TODO: FIXME for n not a power of two
if (_mm_popcnt_u64(n) > (__int64)1) {
perror("n not a power of two");
return EXIT_FAILURE;
}
const size_t b = atoz(argv[2]);
if (!b) {
perror("atoz(b)");
return EXIT_FAILURE;
}
FILE *const f = fopen(argv[3], "rb");
if (!f) {
perror("fopen");
return EXIT_FAILURE;
}
Dmem *const d = Dalloc(n);
if (!d)
return EXIT_FAILURE;
Tout *const t = Talloc(n);
if (!t)
return EXIT_FAILURE;
(void)fprintf(stdout, "\"DBATCH\",\"WTIMEs\",\"K2\",\"RE\",\"OU\",\"OV\"");
#ifdef USE_MSR
(void)fprintf(stdout, ",\"A_M\"");
#endif /* USE_MSR */
(void)fprintf(stdout, "\n");
(void)fflush(stdout);
const size_t V = n2V(n);
#ifdef USE_MSR
const size_t mt = (size_t)omp_get_max_threads();
FILE *const mf = fopen("dmsr.csv", "w");
if (!mf) {
perror("fopen(dmsr.csv)");
return EXIT_FAILURE;
}
(void)fprintf(mf, "\"DBATCH\",\"TIX\",\"MPERF\",\"APERF\"\n");
(void)fflush(mf);
#endif /* USE_MSR */
for (size_t j = (size_t)1u; j <= b; ++j) {
if (n != fread(d->r.A11, sizeof(double), n, f)) {
perror("fread(A11r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A21, sizeof(double), n, f)) {
perror("fread(A21r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A12, sizeof(double), n, f)) {
perror("fread(A12r)");
return EXIT_FAILURE;
}
if (n != fread(d->r.A22, sizeof(double), n, f)) {
perror("fread(A22r)");
return EXIT_FAILURE;
}
double
#ifdef USE_MSR
avg = 0.0,
#endif /* USE_MSR */
w = omp_get_wtime();
#ifdef _OPENMP
#ifdef USE_MSR
#pragma omp parallel default(none) shared(j,V,d,mf) reduction(+:avg)
#else /* !USE_MSR */
#pragma omp parallel default(none) shared(V,d)
#endif /* ?USE_MSR */
{
#endif /* _OPENMP */
#ifdef USE_MSR
const int tix = omp_get_thread_num();
const int cfd = msr_open(msr_mycpu());
uint64_t aperf = UINT64_C(0), mperf = UINT64_C(0);
if (cfd >= 0) {
(void)msr_read(cfd, IA32_MPERF, &mperf);
(void)msr_read(cfd, IA32_APERF, &aperf);
}
#endif /* USE_MSR */
#ifdef _OPENMP
#pragma omp for
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < V; ++i) {
const size_t k = (i << VLlg);
d8svd2_
((d->r.A11 + k), (d->r.A21 + k), (d->r.A12 + k), (d->r.A22 + k),
(d->r.U11 + k), (d->r.U21 + k), (d->r.U12 + k), (d->r.U22 + k),
(d->r.V11 + k), (d->r.V21 + k), (d->r.V12 + k), (d->r.V22 + k),
(d->v.S1 + k), (d->v.S2 + k), (d->v.s + k));
}
#ifdef USE_MSR
if (cfd >= 0) {
uint64_t mval = UINT64_C(0), aval = UINT64_C(0);
(void)msr_read(cfd, IA32_MPERF, &mval);
(void)msr_read(cfd, IA32_APERF, &aval);
(void)msr_close(cfd);
mperf = ((mval > mperf) ? (mval - mperf) : UINT64_C(0));
aperf = ((aval > aperf) ? (aval - aperf) : UINT64_C(0));
avg = ((mperf && aperf && (mperf > aperf)) ? (((double)aperf) / mperf) : 1.0);
}
else
avg = 1.0;
#pragma omp critical
{
(void)fprintf(mf, "%zu,%d,%lu,%lu\n", j, tix, mperf, aperf);
(void)fflush(mf);
}
#endif /* USE_MSR */
#ifdef _OPENMP
}
#endif /* _OPENMP */
w = omp_get_wtime() - w;
#ifdef USE_MSR
avg /= mt;
#endif /* USE_MSR */
wdre
(n, t->K2, t->RE, t->OU, t->OV,
d->r.A11, d->r.A21, d->r.A12, d->r.A22,
d->r.U11, d->r.U21, d->r.U12, d->r.U22,
d->r.V11, d->r.V21, d->r.V12, d->r.V22,
d->v.S1, d->v.S2, d->v.s);
// TODO: FIXME for n not a power of two
for (size_t k = n >> 1u; k; k >>= 1u) {
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(k,t)
#endif /* _OPENMP */
for (size_t i = (size_t)0u; i < k; ++i) {
(t->K2)[i] = fmaxw((t->K2)[i], (t->K2)[i + k]);
(t->RE)[i] = fmaxw((t->RE)[i], (t->RE)[i + k]);
(t->OU)[i] = fmaxw((t->OU)[i], (t->OU)[i + k]);
(t->OV)[i] = fmaxw((t->OV)[i], (t->OV)[i + k]);
}
}
(void)Bwre(stdout, j, w, *(t->K2), *(t->RE), *(t->OU), *(t->OV),
#ifdef USE_MSR
&avg
#else /* !USE_MSR */
(const double*)NULL
#endif /* ?USE_MSR */
);
}
#ifdef USE_MSR
(void)fclose(mf);
#endif /* USE_MSR */
(void)Tfree(t);
(void)Dfree(d);
return (fclose(f) ? EXIT_FAILURE : EXIT_SUCCESS);
}
|
tree-parloops.c | /* Loop autoparallelization.
Copyright (C) 2006-2015 Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr>
Zdenek Dvorak <dvorakz@suse.cz> and Razya Ladelsky <razya@il.ibm.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "options.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "fold-const.h"
#include "predict.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "input.h"
#include "function.h"
#include "dominance.h"
#include "cfg.h"
#include "basic-block.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "gimple-expr.h"
#include "is-a.h"
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "stor-layout.h"
#include "tree-nested.h"
#include "gimple-ssa.h"
#include "tree-cfg.h"
#include "tree-phinodes.h"
#include "ssa-iterators.h"
#include "stringpool.h"
#include "tree-ssanames.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "tree-into-ssa.h"
#include "cfgloop.h"
#include "tree-data-ref.h"
#include "tree-scalar-evolution.h"
#include "gimple-pretty-print.h"
#include "tree-pass.h"
#include "langhooks.h"
#include "tree-vectorizer.h"
#include "tree-hasher.h"
#include "tree-parloops.h"
#include "omp-low.h"
#include "tree-nested.h"
#include "plugin-api.h"
#include "ipa-ref.h"
#include "cgraph.h"
/* This pass tries to distribute iterations of loops into several threads.
The implementation is straightforward -- for each loop we test whether its
iterations are independent, and if it is the case (and some additional
conditions regarding profitability and correctness are satisfied), we
add GIMPLE_OMP_PARALLEL and GIMPLE_OMP_FOR codes and let omp expansion
machinery do its job.
The most of the complexity is in bringing the code into shape expected
by the omp expanders:
-- for GIMPLE_OMP_FOR, ensuring that the loop has only one induction
variable and that the exit test is at the start of the loop body
-- for GIMPLE_OMP_PARALLEL, replacing the references to local addressable
variables by accesses through pointers, and breaking up ssa chains
by storing the values incoming to the parallelized loop to a structure
passed to the new function as an argument (something similar is done
in omp gimplification, unfortunately only a small part of the code
can be shared).
TODO:
-- if there are several parallelizable loops in a function, it may be
possible to generate the threads just once (using synchronization to
ensure that cross-loop dependences are obeyed).
-- handling of common reduction patterns for outer loops.
More info can also be found at http://gcc.gnu.org/wiki/AutoParInGCC */
/*
Reduction handling:
currently we use vect_force_simple_reduction() to detect reduction patterns.
The code transformation will be introduced by an example.
parloop
{
int sum=1;
for (i = 0; i < N; i++)
{
x[i] = i + 3;
sum+=x[i];
}
}
gimple-like code:
header_bb:
# sum_29 = PHI <sum_11(5), 1(3)>
# i_28 = PHI <i_12(5), 0(3)>
D.1795_8 = i_28 + 3;
x[i_28] = D.1795_8;
sum_11 = D.1795_8 + sum_29;
i_12 = i_28 + 1;
if (N_6(D) > i_12)
goto header_bb;
exit_bb:
# sum_21 = PHI <sum_11(4)>
printf (&"%d"[0], sum_21);
after reduction transformation (only relevant parts):
parloop
{
....
# Storing the initial value given by the user. #
.paral_data_store.32.sum.27 = 1;
#pragma omp parallel num_threads(4)
#pragma omp for schedule(static)
# The neutral element corresponding to the particular
reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. replaces the user's initial value. #
# sum.27_29 = PHI <sum.27_11, 0>
sum.27_11 = D.1827_8 + sum.27_29;
GIMPLE_OMP_CONTINUE
# Adding this reduction phi is done at create_phi_for_local_result() #
# sum.27_56 = PHI <sum.27_11, 0>
GIMPLE_OMP_RETURN
# Creating the atomic operation is done at
create_call_for_reduction_1() #
#pragma omp atomic_load
D.1839_59 = *&.paral_data_load.33_51->reduction.23;
D.1840_60 = sum.27_56 + D.1839_59;
#pragma omp atomic_store (D.1840_60);
GIMPLE_OMP_RETURN
# collecting the result after the join of the threads is done at
create_loads_for_reductions().
The value computed by the threads is loaded from the
shared struct. #
.paral_data_load.33_52 = &.paral_data_store.32;
sum_37 = .paral_data_load.33_52->sum.27;
sum_43 = D.1795_41 + sum_37;
exit bb:
# sum_21 = PHI <sum_43, sum_26>
printf (&"%d"[0], sum_21);
...
}
*/
/* Minimal number of iterations of a loop that should be executed in each
thread. */
#define MIN_PER_THREAD 100
/* Element of the hashtable, representing a
reduction in the current loop. */
struct reduction_info
{
gimple reduc_stmt; /* reduction statement. */
gimple reduc_phi; /* The phi node defining the reduction. */
enum tree_code reduction_code;/* code for the reduction operation. */
unsigned reduc_version; /* SSA_NAME_VERSION of original reduc_phi
result. */
gphi *keep_res; /* The PHI_RESULT of this phi is the resulting value
of the reduction variable when existing the loop. */
tree initial_value; /* The initial value of the reduction var before entering the loop. */
tree field; /* the name of the field in the parloop data structure intended for reduction. */
tree init; /* reduction initialization value. */
gphi *new_phi; /* (helper field) Newly created phi node whose result
will be passed to the atomic operation. Represents
the local result each thread computed for the reduction
operation. */
};
/* Reduction info hashtable helpers. */
struct reduction_hasher : typed_free_remove <reduction_info>
{
typedef reduction_info value_type;
typedef reduction_info compare_type;
static inline hashval_t hash (const value_type *);
static inline bool equal (const value_type *, const compare_type *);
};
/* Equality and hash functions for hashtab code. */
inline bool
reduction_hasher::equal (const value_type *a, const compare_type *b)
{
return (a->reduc_phi == b->reduc_phi);
}
inline hashval_t
reduction_hasher::hash (const value_type *a)
{
return a->reduc_version;
}
typedef hash_table<reduction_hasher> reduction_info_table_type;
static struct reduction_info *
reduction_phi (reduction_info_table_type *reduction_list, gimple phi)
{
struct reduction_info tmpred, *red;
if (reduction_list->elements () == 0 || phi == NULL)
return NULL;
tmpred.reduc_phi = phi;
tmpred.reduc_version = gimple_uid (phi);
red = reduction_list->find (&tmpred);
return red;
}
/* Element of hashtable of names to copy. */
struct name_to_copy_elt
{
unsigned version; /* The version of the name to copy. */
tree new_name; /* The new name used in the copy. */
tree field; /* The field of the structure used to pass the
value. */
};
/* Name copies hashtable helpers. */
struct name_to_copy_hasher : typed_free_remove <name_to_copy_elt>
{
typedef name_to_copy_elt value_type;
typedef name_to_copy_elt compare_type;
static inline hashval_t hash (const value_type *);
static inline bool equal (const value_type *, const compare_type *);
};
/* Equality and hash functions for hashtab code. */
inline bool
name_to_copy_hasher::equal (const value_type *a, const compare_type *b)
{
return a->version == b->version;
}
inline hashval_t
name_to_copy_hasher::hash (const value_type *a)
{
return (hashval_t) a->version;
}
typedef hash_table<name_to_copy_hasher> name_to_copy_table_type;
/* A transformation matrix, which is a self-contained ROWSIZE x COLSIZE
matrix. Rather than use floats, we simply keep a single DENOMINATOR that
represents the denominator for every element in the matrix. */
typedef struct lambda_trans_matrix_s
{
lambda_matrix matrix;
int rowsize;
int colsize;
int denominator;
} *lambda_trans_matrix;
#define LTM_MATRIX(T) ((T)->matrix)
#define LTM_ROWSIZE(T) ((T)->rowsize)
#define LTM_COLSIZE(T) ((T)->colsize)
#define LTM_DENOMINATOR(T) ((T)->denominator)
/* Allocate a new transformation matrix. */
static lambda_trans_matrix
lambda_trans_matrix_new (int colsize, int rowsize,
struct obstack * lambda_obstack)
{
lambda_trans_matrix ret;
ret = (lambda_trans_matrix)
obstack_alloc (lambda_obstack, sizeof (struct lambda_trans_matrix_s));
LTM_MATRIX (ret) = lambda_matrix_new (rowsize, colsize, lambda_obstack);
LTM_ROWSIZE (ret) = rowsize;
LTM_COLSIZE (ret) = colsize;
LTM_DENOMINATOR (ret) = 1;
return ret;
}
/* Multiply a vector VEC by a matrix MAT.
MAT is an M*N matrix, and VEC is a vector with length N. The result
is stored in DEST which must be a vector of length M. */
static void
lambda_matrix_vector_mult (lambda_matrix matrix, int m, int n,
lambda_vector vec, lambda_vector dest)
{
int i, j;
lambda_vector_clear (dest, m);
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
dest[i] += matrix[i][j] * vec[j];
}
/* Return true if TRANS is a legal transformation matrix that respects
the dependence vectors in DISTS and DIRS. The conservative answer
is false.
"Wolfe proves that a unimodular transformation represented by the
matrix T is legal when applied to a loop nest with a set of
lexicographically non-negative distance vectors RDG if and only if
for each vector d in RDG, (T.d >= 0) is lexicographically positive.
i.e.: if and only if it transforms the lexicographically positive
distance vectors to lexicographically positive vectors. Note that
a unimodular matrix must transform the zero vector (and only it) to
the zero vector." S.Muchnick. */
static bool
lambda_transform_legal_p (lambda_trans_matrix trans,
int nb_loops,
vec<ddr_p> dependence_relations)
{
unsigned int i, j;
lambda_vector distres;
struct data_dependence_relation *ddr;
gcc_assert (LTM_COLSIZE (trans) == nb_loops
&& LTM_ROWSIZE (trans) == nb_loops);
/* When there are no dependences, the transformation is correct. */
if (dependence_relations.length () == 0)
return true;
ddr = dependence_relations[0];
if (ddr == NULL)
return true;
/* When there is an unknown relation in the dependence_relations, we
know that it is no worth looking at this loop nest: give up. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return false;
distres = lambda_vector_new (nb_loops);
/* For each distance vector in the dependence graph. */
FOR_EACH_VEC_ELT (dependence_relations, i, ddr)
{
/* Don't care about relations for which we know that there is no
dependence, nor about read-read (aka. output-dependences):
these data accesses can happen in any order. */
if (DDR_ARE_DEPENDENT (ddr) == chrec_known
|| (DR_IS_READ (DDR_A (ddr)) && DR_IS_READ (DDR_B (ddr))))
continue;
/* Conservatively answer: "this transformation is not valid". */
if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
return false;
/* If the dependence could not be captured by a distance vector,
conservatively answer that the transform is not valid. */
if (DDR_NUM_DIST_VECTS (ddr) == 0)
return false;
/* Compute trans.dist_vect */
for (j = 0; j < DDR_NUM_DIST_VECTS (ddr); j++)
{
lambda_matrix_vector_mult (LTM_MATRIX (trans), nb_loops, nb_loops,
DDR_DIST_VECT (ddr, j), distres);
if (!lambda_vector_lexico_pos (distres, nb_loops))
return false;
}
}
return true;
}
/* Data dependency analysis. Returns true if the iterations of LOOP
are independent on each other (that is, if we can execute them
in parallel). */
static bool
loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack)
{
vec<ddr_p> dependence_relations;
vec<data_reference_p> datarefs;
lambda_trans_matrix trans;
bool ret = false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Considering loop %d\n", loop->num);
if (!loop->inner)
fprintf (dump_file, "loop is innermost\n");
else
fprintf (dump_file, "loop NOT innermost\n");
}
/* Check for problems with dependences. If the loop can be reversed,
the iterations are independent. */
auto_vec<loop_p, 3> loop_nest;
datarefs.create (10);
dependence_relations.create (100);
if (! compute_data_dependences_for_loop (loop, true, &loop_nest, &datarefs,
&dependence_relations))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: cannot analyze data dependencies\n");
ret = false;
goto end;
}
if (dump_file && (dump_flags & TDF_DETAILS))
dump_data_dependence_relations (dump_file, dependence_relations);
trans = lambda_trans_matrix_new (1, 1, parloop_obstack);
LTM_MATRIX (trans)[0][0] = -1;
if (lambda_transform_legal_p (trans, 1, dependence_relations))
{
ret = true;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " SUCCESS: may be parallelized\n");
}
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: data dependencies exist across iterations\n");
end:
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
return ret;
}
/* Return true when LOOP contains basic blocks marked with the
BB_IRREDUCIBLE_LOOP flag. */
static inline bool
loop_has_blocks_with_irreducible_flag (struct loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
bool res = true;
for (i = 0; i < loop->num_nodes; i++)
if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
The assignment statement is placed on edge ENTRY. DECL_ADDRESS maps decls
to their addresses that can be reused. The address of OBJ is known to
be invariant in the whole function. Other needed statements are placed
right before GSI. */
static tree
take_address_of (tree obj, tree type, edge entry,
int_tree_htab_type *decl_address, gimple_stmt_iterator *gsi)
{
int uid;
tree *var_p, name, addr;
gassign *stmt;
gimple_seq stmts;
/* Since the address of OBJ is invariant, the trees may be shared.
Avoid rewriting unrelated parts of the code. */
obj = unshare_expr (obj);
for (var_p = &obj;
handled_component_p (*var_p);
var_p = &TREE_OPERAND (*var_p, 0))
continue;
/* Canonicalize the access to base on a MEM_REF. */
if (DECL_P (*var_p))
*var_p = build_simple_mem_ref (build_fold_addr_expr (*var_p));
/* Assign a canonical SSA name to the address of the base decl used
in the address and share it for all accesses and addresses based
on it. */
uid = DECL_UID (TREE_OPERAND (TREE_OPERAND (*var_p, 0), 0));
int_tree_map elt;
elt.uid = uid;
int_tree_map *slot = decl_address->find_slot (elt, INSERT);
if (!slot->to)
{
if (gsi == NULL)
return NULL;
addr = TREE_OPERAND (*var_p, 0);
const char *obj_name
= get_name (TREE_OPERAND (TREE_OPERAND (*var_p, 0), 0));
if (obj_name)
name = make_temp_ssa_name (TREE_TYPE (addr), NULL, obj_name);
else
name = make_ssa_name (TREE_TYPE (addr));
stmt = gimple_build_assign (name, addr);
gsi_insert_on_edge_immediate (entry, stmt);
slot->uid = uid;
slot->to = name;
}
else
name = slot->to;
/* Express the address in terms of the canonical SSA name. */
TREE_OPERAND (*var_p, 0) = name;
if (gsi == NULL)
return build_fold_addr_expr_with_type (obj, type);
name = force_gimple_operand (build_addr (obj, current_function_decl),
&stmts, true, NULL_TREE);
if (!gimple_seq_empty_p (stmts))
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
if (!useless_type_conversion_p (type, TREE_TYPE (name)))
{
name = force_gimple_operand (fold_convert (type, name), &stmts, true,
NULL_TREE);
if (!gimple_seq_empty_p (stmts))
gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
}
return name;
}
/* Callback for htab_traverse. Create the initialization statement
for reduction described in SLOT, and place it at the preheader of
the loop described in DATA. */
int
initialize_reductions (reduction_info **slot, struct loop *loop)
{
tree init, c;
tree bvar, type, arg;
edge e;
struct reduction_info *const reduc = *slot;
/* Create initialization in preheader:
reduction_variable = initialization value of reduction. */
/* In the phi node at the header, replace the argument coming
from the preheader with the reduction initialization value. */
/* Create a new variable to initialize the reduction. */
type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
bvar = create_tmp_var (type, "reduction");
c = build_omp_clause (gimple_location (reduc->reduc_stmt),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code;
OMP_CLAUSE_DECL (c) = SSA_NAME_VAR (gimple_assign_lhs (reduc->reduc_stmt));
init = omp_reduction_init (c, TREE_TYPE (bvar));
reduc->init = init;
/* Replace the argument representing the initialization value
with the initialization value for the reduction (neutral
element for the particular operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc).
Keep the old value in a new variable "reduction_initial",
that will be taken in consideration after the parallel
computing is done. */
e = loop_preheader_edge (loop);
arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
/* Create new variable to hold the initial value. */
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
(reduc->reduc_phi, loop_preheader_edge (loop)), init);
reduc->initial_value = arg;
return 1;
}
struct elv_data
{
struct walk_stmt_info info;
edge entry;
int_tree_htab_type *decl_address;
gimple_stmt_iterator *gsi;
bool changed;
bool reset;
};
/* Eliminates references to local variables in *TP out of the single
entry single exit region starting at DTA->ENTRY.
DECL_ADDRESS contains addresses of the references that had their
address taken already. If the expression is changed, CHANGED is
set to true. Callback for walk_tree. */
static tree
eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data)
{
struct elv_data *const dta = (struct elv_data *) data;
tree t = *tp, var, addr, addr_type, type, obj;
if (DECL_P (t))
{
*walk_subtrees = 0;
if (!SSA_VAR_P (t) || DECL_EXTERNAL (t))
return NULL_TREE;
type = TREE_TYPE (t);
addr_type = build_pointer_type (type);
addr = take_address_of (t, addr_type, dta->entry, dta->decl_address,
dta->gsi);
if (dta->gsi == NULL && addr == NULL_TREE)
{
dta->reset = true;
return NULL_TREE;
}
*tp = build_simple_mem_ref (addr);
dta->changed = true;
return NULL_TREE;
}
if (TREE_CODE (t) == ADDR_EXPR)
{
/* ADDR_EXPR may appear in two contexts:
-- as a gimple operand, when the address taken is a function invariant
-- as gimple rhs, when the resulting address in not a function
invariant
We do not need to do anything special in the latter case (the base of
the memory reference whose address is taken may be replaced in the
DECL_P case). The former case is more complicated, as we need to
ensure that the new address is still a gimple operand. Thus, it
is not sufficient to replace just the base of the memory reference --
we need to move the whole computation of the address out of the
loop. */
if (!is_gimple_val (t))
return NULL_TREE;
*walk_subtrees = 0;
obj = TREE_OPERAND (t, 0);
var = get_base_address (obj);
if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var))
return NULL_TREE;
addr_type = TREE_TYPE (t);
addr = take_address_of (obj, addr_type, dta->entry, dta->decl_address,
dta->gsi);
if (dta->gsi == NULL && addr == NULL_TREE)
{
dta->reset = true;
return NULL_TREE;
}
*tp = addr;
dta->changed = true;
return NULL_TREE;
}
if (!EXPR_P (t))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Moves the references to local variables in STMT at *GSI out of the single
entry single exit region starting at ENTRY. DECL_ADDRESS contains
addresses of the references that had their address taken
already. */
static void
eliminate_local_variables_stmt (edge entry, gimple_stmt_iterator *gsi,
int_tree_htab_type *decl_address)
{
struct elv_data dta;
gimple stmt = gsi_stmt (*gsi);
memset (&dta.info, '\0', sizeof (dta.info));
dta.entry = entry;
dta.decl_address = decl_address;
dta.changed = false;
dta.reset = false;
if (gimple_debug_bind_p (stmt))
{
dta.gsi = NULL;
walk_tree (gimple_debug_bind_get_value_ptr (stmt),
eliminate_local_variables_1, &dta.info, NULL);
if (dta.reset)
{
gimple_debug_bind_reset_value (stmt);
dta.changed = true;
}
}
else if (gimple_clobber_p (stmt))
{
unlink_stmt_vdef (stmt);
stmt = gimple_build_nop ();
gsi_replace (gsi, stmt, false);
dta.changed = true;
}
else
{
dta.gsi = gsi;
walk_gimple_op (stmt, eliminate_local_variables_1, &dta.info);
}
if (dta.changed)
update_stmt (stmt);
}
/* Eliminates the references to local variables from the single entry
single exit region between the ENTRY and EXIT edges.
This includes:
1) Taking address of a local variable -- these are moved out of the
region (and temporary variable is created to hold the address if
necessary).
2) Dereferencing a local variable -- these are replaced with indirect
references. */
static void
eliminate_local_variables (edge entry, edge exit)
{
basic_block bb;
auto_vec<basic_block, 3> body;
unsigned i;
gimple_stmt_iterator gsi;
bool has_debug_stmt = false;
int_tree_htab_type decl_address (10);
basic_block entry_bb = entry->src;
basic_block exit_bb = exit->dest;
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (is_gimple_debug (gsi_stmt (gsi)))
{
if (gimple_debug_bind_p (gsi_stmt (gsi)))
has_debug_stmt = true;
}
else
eliminate_local_variables_stmt (entry, &gsi, &decl_address);
if (has_debug_stmt)
FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (gimple_debug_bind_p (gsi_stmt (gsi)))
eliminate_local_variables_stmt (entry, &gsi, &decl_address);
}
/* Returns true if expression EXPR is not defined between ENTRY and
EXIT, i.e. if all its operands are defined outside of the region. */
static bool
expr_invariant_in_region_p (edge entry, edge exit, tree expr)
{
basic_block entry_bb = entry->src;
basic_block exit_bb = exit->dest;
basic_block def_bb;
if (is_gimple_min_invariant (expr))
return true;
if (TREE_CODE (expr) == SSA_NAME)
{
def_bb = gimple_bb (SSA_NAME_DEF_STMT (expr));
if (def_bb
&& dominated_by_p (CDI_DOMINATORS, def_bb, entry_bb)
&& !dominated_by_p (CDI_DOMINATORS, def_bb, exit_bb))
return false;
return true;
}
return false;
}
/* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
The copies are stored to NAME_COPIES, if NAME was already duplicated,
its duplicate stored in NAME_COPIES is returned.
Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
duplicated, storing the copies in DECL_COPIES. */
static tree
separate_decls_in_region_name (tree name, name_to_copy_table_type *name_copies,
int_tree_htab_type *decl_copies,
bool copy_name_p)
{
tree copy, var, var_copy;
unsigned idx, uid, nuid;
struct int_tree_map ielt;
struct name_to_copy_elt elt, *nelt;
name_to_copy_elt **slot;
int_tree_map *dslot;
if (TREE_CODE (name) != SSA_NAME)
return name;
idx = SSA_NAME_VERSION (name);
elt.version = idx;
slot = name_copies->find_slot_with_hash (&elt, idx,
copy_name_p ? INSERT : NO_INSERT);
if (slot && *slot)
return (*slot)->new_name;
if (copy_name_p)
{
copy = duplicate_ssa_name (name, NULL);
nelt = XNEW (struct name_to_copy_elt);
nelt->version = idx;
nelt->new_name = copy;
nelt->field = NULL_TREE;
*slot = nelt;
}
else
{
gcc_assert (!slot);
copy = name;
}
var = SSA_NAME_VAR (name);
if (!var)
return copy;
uid = DECL_UID (var);
ielt.uid = uid;
dslot = decl_copies->find_slot_with_hash (ielt, uid, INSERT);
if (!dslot->to)
{
var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var);
dslot->uid = uid;
dslot->to = var_copy;
/* Ensure that when we meet this decl next time, we won't duplicate
it again. */
nuid = DECL_UID (var_copy);
ielt.uid = nuid;
dslot = decl_copies->find_slot_with_hash (ielt, nuid, INSERT);
gcc_assert (!dslot->to);
dslot->uid = nuid;
dslot->to = var_copy;
}
else
var_copy = dslot->to;
replace_ssa_name_symbol (copy, var_copy);
return copy;
}
/* Finds the ssa names used in STMT that are defined outside the
region between ENTRY and EXIT and replaces such ssa names with
their duplicates. The duplicates are stored to NAME_COPIES. Base
decls of all ssa names used in STMT (including those defined in
LOOP) are replaced with the new temporary variables; the
replacement decls are stored in DECL_COPIES. */
static void
separate_decls_in_region_stmt (edge entry, edge exit, gimple stmt,
name_to_copy_table_type *name_copies,
int_tree_htab_type *decl_copies)
{
use_operand_p use;
def_operand_p def;
ssa_op_iter oi;
tree name, copy;
bool copy_name_p;
FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF)
{
name = DEF_FROM_PTR (def);
gcc_assert (TREE_CODE (name) == SSA_NAME);
copy = separate_decls_in_region_name (name, name_copies, decl_copies,
false);
gcc_assert (copy == name);
}
FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
{
name = USE_FROM_PTR (use);
if (TREE_CODE (name) != SSA_NAME)
continue;
copy_name_p = expr_invariant_in_region_p (entry, exit, name);
copy = separate_decls_in_region_name (name, name_copies, decl_copies,
copy_name_p);
SET_USE (use, copy);
}
}
/* Finds the ssa names used in STMT that are defined outside the
region between ENTRY and EXIT and replaces such ssa names with
their duplicates. The duplicates are stored to NAME_COPIES. Base
decls of all ssa names used in STMT (including those defined in
LOOP) are replaced with the new temporary variables; the
replacement decls are stored in DECL_COPIES. */
static bool
separate_decls_in_region_debug (gimple stmt,
name_to_copy_table_type *name_copies,
int_tree_htab_type *decl_copies)
{
use_operand_p use;
ssa_op_iter oi;
tree var, name;
struct int_tree_map ielt;
struct name_to_copy_elt elt;
name_to_copy_elt **slot;
int_tree_map *dslot;
if (gimple_debug_bind_p (stmt))
var = gimple_debug_bind_get_var (stmt);
else if (gimple_debug_source_bind_p (stmt))
var = gimple_debug_source_bind_get_var (stmt);
else
return true;
if (TREE_CODE (var) == DEBUG_EXPR_DECL || TREE_CODE (var) == LABEL_DECL)
return true;
gcc_assert (DECL_P (var) && SSA_VAR_P (var));
ielt.uid = DECL_UID (var);
dslot = decl_copies->find_slot_with_hash (ielt, ielt.uid, NO_INSERT);
if (!dslot)
return true;
if (gimple_debug_bind_p (stmt))
gimple_debug_bind_set_var (stmt, dslot->to);
else if (gimple_debug_source_bind_p (stmt))
gimple_debug_source_bind_set_var (stmt, dslot->to);
FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
{
name = USE_FROM_PTR (use);
if (TREE_CODE (name) != SSA_NAME)
continue;
elt.version = SSA_NAME_VERSION (name);
slot = name_copies->find_slot_with_hash (&elt, elt.version, NO_INSERT);
if (!slot)
{
gimple_debug_bind_reset_value (stmt);
update_stmt (stmt);
break;
}
SET_USE (use, (*slot)->new_name);
}
return false;
}
/* Callback for htab_traverse. Adds a field corresponding to the reduction
specified in SLOT. The type is passed in DATA. */
int
add_field_for_reduction (reduction_info **slot, tree type)
{
struct reduction_info *const red = *slot;
tree var = gimple_assign_lhs (red->reduc_stmt);
tree field = build_decl (gimple_location (red->reduc_stmt), FIELD_DECL,
SSA_NAME_IDENTIFIER (var), TREE_TYPE (var));
insert_field_into_struct (type, field);
red->field = field;
return 1;
}
/* Callback for htab_traverse. Adds a field corresponding to a ssa name
described in SLOT. The type is passed in DATA. */
int
add_field_for_name (name_to_copy_elt **slot, tree type)
{
struct name_to_copy_elt *const elt = *slot;
tree name = ssa_name (elt->version);
tree field = build_decl (UNKNOWN_LOCATION,
FIELD_DECL, SSA_NAME_IDENTIFIER (name),
TREE_TYPE (name));
insert_field_into_struct (type, field);
elt->field = field;
return 1;
}
/* Callback for htab_traverse. A local result is the intermediate result
computed by a single
thread, or the initial value in case no iteration was executed.
This function creates a phi node reflecting these values.
The phi's result will be stored in NEW_PHI field of the
reduction's data structure. */
int
create_phi_for_local_result (reduction_info **slot, struct loop *loop)
{
struct reduction_info *const reduc = *slot;
edge e;
gphi *new_phi;
basic_block store_bb;
tree local_res;
source_location locus;
/* STORE_BB is the block where the phi
should be stored. It is the destination of the loop exit.
(Find the fallthru edge from GIMPLE_OMP_CONTINUE). */
store_bb = FALLTHRU_EDGE (loop->latch)->dest;
/* STORE_BB has two predecessors. One coming from the loop
(the reduction's result is computed at the loop),
and another coming from a block preceding the loop,
when no iterations
are executed (the initial value should be taken). */
if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
e = EDGE_PRED (store_bb, 1);
else
e = EDGE_PRED (store_bb, 0);
local_res = copy_ssa_name (gimple_assign_lhs (reduc->reduc_stmt));
locus = gimple_location (reduc->reduc_stmt);
new_phi = create_phi_node (local_res, store_bb);
add_phi_arg (new_phi, reduc->init, e, locus);
add_phi_arg (new_phi, gimple_assign_lhs (reduc->reduc_stmt),
FALLTHRU_EDGE (loop->latch), locus);
reduc->new_phi = new_phi;
return 1;
}
struct clsn_data
{
tree store;
tree load;
basic_block store_bb;
basic_block load_bb;
};
/* Callback for htab_traverse. Create an atomic instruction for the
reduction described in SLOT.
DATA annotates the place in memory the atomic operation relates to,
and the basic block it needs to be generated in. */
int
create_call_for_reduction_1 (reduction_info **slot, struct clsn_data *clsn_data)
{
struct reduction_info *const reduc = *slot;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
tree load_struct;
basic_block bb;
basic_block new_bb;
edge e;
tree t, addr, ref, x;
tree tmp_load, name;
gimple load;
load_struct = build_simple_mem_ref (clsn_data->load);
t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE);
addr = build_addr (t, current_function_decl);
/* Create phi node. */
bb = clsn_data->load_bb;
gsi = gsi_last_bb (bb);
e = split_block (bb, gsi_stmt (gsi));
new_bb = e->dest;
tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)));
tmp_load = make_ssa_name (tmp_load);
load = gimple_build_omp_atomic_load (tmp_load, addr);
SSA_NAME_DEF_STMT (tmp_load) = load;
gsi = gsi_start_bb (new_bb);
gsi_insert_after (&gsi, load, GSI_NEW_STMT);
e = split_block (new_bb, load);
new_bb = e->dest;
gsi = gsi_start_bb (new_bb);
ref = tmp_load;
x = fold_build2 (reduc->reduction_code,
TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref,
PHI_RESULT (reduc->new_phi));
name = force_gimple_operand_gsi (&gsi, x, true, NULL_TREE, true,
GSI_CONTINUE_LINKING);
gsi_insert_after (&gsi, gimple_build_omp_atomic_store (name), GSI_NEW_STMT);
return 1;
}
/* Create the atomic operation at the join point of the threads.
REDUCTION_LIST describes the reductions in the LOOP.
LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
create_call_for_reduction (struct loop *loop,
reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
reduction_list->traverse <struct loop *, create_phi_for_local_result> (loop);
/* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
reduction_list
->traverse <struct clsn_data *, create_call_for_reduction_1> (ld_st_data);
}
/* Callback for htab_traverse. Loads the final reduction value at the
join point of all threads, and inserts it in the right place. */
int
create_loads_for_reductions (reduction_info **slot, struct clsn_data *clsn_data)
{
struct reduction_info *const red = *slot;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
tree load_struct;
tree name;
tree x;
gsi = gsi_after_labels (clsn_data->load_bb);
load_struct = build_simple_mem_ref (clsn_data->load);
load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
NULL_TREE);
x = load_struct;
name = PHI_RESULT (red->keep_res);
stmt = gimple_build_assign (name, x);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
for (gsi = gsi_start_phis (gimple_bb (red->keep_res));
!gsi_end_p (gsi); gsi_next (&gsi))
if (gsi_stmt (gsi) == red->keep_res)
{
remove_phi_node (&gsi, false);
return 1;
}
gcc_unreachable ();
}
/* Load the reduction result that was stored in LD_ST_DATA.
REDUCTION_LIST describes the list of reductions that the
loads should be generated for. */
static void
create_final_loads_for_reduction (reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
gimple_stmt_iterator gsi;
tree t;
gimple stmt;
gsi = gsi_after_labels (ld_st_data->load_bb);
t = build_fold_addr_expr (ld_st_data->store);
stmt = gimple_build_assign (ld_st_data->load, t);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
reduction_list
->traverse <struct clsn_data *, create_loads_for_reductions> (ld_st_data);
}
/* Callback for htab_traverse. Store the neutral value for the
particular reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. into the reduction field.
The reduction is specified in SLOT. The store information is
passed in DATA. */
int
create_stores_for_reduction (reduction_info **slot, struct clsn_data *clsn_data)
{
struct reduction_info *const red = *slot;
tree t;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (gimple_assign_lhs (red->reduc_stmt));
gsi = gsi_last_bb (clsn_data->store_bb);
t = build3 (COMPONENT_REF, type, clsn_data->store, red->field, NULL_TREE);
stmt = gimple_build_assign (t, red->initial_value);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
return 1;
}
/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
store to a field of STORE in STORE_BB for the ssa name and its duplicate
specified in SLOT. */
int
create_loads_and_stores_for_name (name_to_copy_elt **slot,
struct clsn_data *clsn_data)
{
struct name_to_copy_elt *const elt = *slot;
tree t;
gimple stmt;
gimple_stmt_iterator gsi;
tree type = TREE_TYPE (elt->new_name);
tree load_struct;
gsi = gsi_last_bb (clsn_data->store_bb);
t = build3 (COMPONENT_REF, type, clsn_data->store, elt->field, NULL_TREE);
stmt = gimple_build_assign (t, ssa_name (elt->version));
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
gsi = gsi_last_bb (clsn_data->load_bb);
load_struct = build_simple_mem_ref (clsn_data->load);
t = build3 (COMPONENT_REF, type, load_struct, elt->field, NULL_TREE);
stmt = gimple_build_assign (elt->new_name, t);
gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
return 1;
}
/* Moves all the variables used in LOOP and defined outside of it (including
the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
name) to a structure created for this purpose. The code
while (1)
{
use (a);
use (b);
}
is transformed this way:
bb0:
old.a = a;
old.b = b;
bb1:
a' = new->a;
b' = new->b;
while (1)
{
use (a');
use (b');
}
`old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
pointer `new' is intentionally not initialized (the loop will be split to a
separate function later, and `new' will be initialized from its arguments).
LD_ST_DATA holds information about the shared data structure used to pass
information among the threads. It is initialized here, and
gen_parallel_loop will pass it to create_call_for_reduction that
needs this information. REDUCTION_LIST describes the reductions
in LOOP. */
static void
separate_decls_in_region (edge entry, edge exit,
reduction_info_table_type *reduction_list,
tree *arg_struct, tree *new_arg_struct,
struct clsn_data *ld_st_data)
{
basic_block bb1 = split_edge (entry);
basic_block bb0 = single_pred (bb1);
name_to_copy_table_type name_copies (10);
int_tree_htab_type decl_copies (10);
unsigned i;
tree type, type_name, nvar;
gimple_stmt_iterator gsi;
struct clsn_data clsn_data;
auto_vec<basic_block, 3> body;
basic_block bb;
basic_block entry_bb = bb1;
basic_block exit_bb = exit->dest;
bool has_debug_stmt = false;
entry = single_succ_edge (entry_bb);
gather_blocks_in_sese_region (entry_bb, exit_bb, &body);
FOR_EACH_VEC_ELT (body, i, bb)
{
if (bb != entry_bb && bb != exit_bb)
{
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
separate_decls_in_region_stmt (entry, exit, gsi_stmt (gsi),
&name_copies, &decl_copies);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
has_debug_stmt = true;
else
separate_decls_in_region_stmt (entry, exit, stmt,
&name_copies, &decl_copies);
}
}
}
/* Now process debug bind stmts. We must not create decls while
processing debug stmts, so we defer their processing so as to
make sure we will have debug info for as many variables as
possible (all of those that were dealt with in the loop above),
and discard those for which we know there's nothing we can
do. */
if (has_debug_stmt)
FOR_EACH_VEC_ELT (body, i, bb)
if (bb != entry_bb && bb != exit_bb)
{
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
{
gimple stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
{
if (separate_decls_in_region_debug (stmt, &name_copies,
&decl_copies))
{
gsi_remove (&gsi, true);
continue;
}
}
gsi_next (&gsi);
}
}
if (name_copies.elements () == 0 && reduction_list->elements () == 0)
{
/* It may happen that there is nothing to copy (if there are only
loop carried and external variables in the loop). */
*arg_struct = NULL;
*new_arg_struct = NULL;
}
else
{
/* Create the type for the structure to store the ssa names to. */
type = lang_hooks.types.make_type (RECORD_TYPE);
type_name = build_decl (UNKNOWN_LOCATION,
TYPE_DECL, create_tmp_var_name (".paral_data"),
type);
TYPE_NAME (type) = type_name;
name_copies.traverse <tree, add_field_for_name> (type);
if (reduction_list && reduction_list->elements () > 0)
{
/* Create the fields for reductions. */
reduction_list->traverse <tree, add_field_for_reduction> (type);
}
layout_type (type);
/* Create the loads and stores. */
*arg_struct = create_tmp_var (type, ".paral_data_store");
nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load");
*new_arg_struct = make_ssa_name (nvar);
ld_st_data->store = *arg_struct;
ld_st_data->load = *new_arg_struct;
ld_st_data->store_bb = bb0;
ld_st_data->load_bb = bb1;
name_copies
.traverse <struct clsn_data *, create_loads_and_stores_for_name>
(ld_st_data);
/* Load the calculation from memory (after the join of the threads). */
if (reduction_list && reduction_list->elements () > 0)
{
reduction_list
->traverse <struct clsn_data *, create_stores_for_reduction>
(ld_st_data);
clsn_data.load = make_ssa_name (nvar);
clsn_data.load_bb = exit->dest;
clsn_data.store = ld_st_data->store;
create_final_loads_for_reduction (reduction_list, &clsn_data);
}
}
}
/* Returns true if FN was created to run in parallel. */
bool
parallelized_function_p (tree fndecl)
{
cgraph_node *node = cgraph_node::get (fndecl);
gcc_assert (node != NULL);
return node->parallelized_function;
}
/* Creates and returns an empty function that will receive the body of
a parallelized loop. */
static tree
create_loop_fn (location_t loc)
{
char buf[100];
char *tname;
tree decl, type, name, t;
struct function *act_cfun = cfun;
static unsigned loopfn_num;
loc = LOCATION_LOCUS (loc);
snprintf (buf, 100, "%s.$loopfn", current_function_name ());
ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++);
clean_symbol_name (tname);
name = get_identifier (tname);
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (loc, FUNCTION_DECL, name, type);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
t = build_decl (loc, RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
t = build_decl (loc, PARM_DECL, get_identifier (".paral_data_param"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
allocate_struct_function (decl, false);
/* The call to allocate_struct_function clobbers CFUN, so we need to restore
it. */
set_cfun (act_cfun);
return decl;
}
/* Moves the exit condition of LOOP to the beginning of its header, and
duplicates the part of the last iteration that gets disabled to the
exit of the loop. NIT is the number of iterations of the loop
(used to initialize the variables in the duplicated part).
TODO: the common case is that latch of the loop is empty and immediately
follows the loop exit. In this case, it would be better not to copy the
body of the loop, but only move the entry of the loop directly before the
exit check and increase the number of iterations of the loop by one.
This may need some additional preconditioning in case NIT = ~0.
REDUCTION_LIST describes the reductions in LOOP. */
static void
transform_to_exit_first_loop (struct loop *loop,
reduction_info_table_type *reduction_list,
tree nit)
{
basic_block *bbs, *nbbs, ex_bb, orig_header;
unsigned n;
bool ok;
edge exit = single_dom_exit (loop), hpred;
tree control, control_name, res, t;
gphi *phi, *nphi;
gassign *stmt;
gcond *cond_stmt, *cond_nit;
tree nit_1;
split_block_after_labels (loop->header);
orig_header = single_succ (loop->header);
hpred = single_succ_edge (loop->header);
cond_stmt = as_a <gcond *> (last_stmt (exit->src));
control = gimple_cond_lhs (cond_stmt);
gcc_assert (gimple_cond_rhs (cond_stmt) == nit);
/* Make sure that we have phi nodes on exit for all loop header phis
(create_parallel_loop requires that). */
for (gphi_iterator gsi = gsi_start_phis (loop->header);
!gsi_end_p (gsi);
gsi_next (&gsi))
{
phi = gsi.phi ();
res = PHI_RESULT (phi);
t = copy_ssa_name (res, phi);
SET_PHI_RESULT (phi, t);
nphi = create_phi_node (res, orig_header);
add_phi_arg (nphi, t, hpred, UNKNOWN_LOCATION);
if (res == control)
{
gimple_cond_set_lhs (cond_stmt, t);
update_stmt (cond_stmt);
control = t;
}
}
bbs = get_loop_body_in_dom_order (loop);
for (n = 0; bbs[n] != exit->src; n++)
continue;
nbbs = XNEWVEC (basic_block, n);
ok = gimple_duplicate_sese_tail (single_succ_edge (loop->header), exit,
bbs + 1, n, nbbs);
gcc_assert (ok);
free (bbs);
ex_bb = nbbs[0];
free (nbbs);
/* Other than reductions, the only gimple reg that should be copied
out of the loop is the control variable. */
exit = single_dom_exit (loop);
control_name = NULL_TREE;
for (gphi_iterator gsi = gsi_start_phis (ex_bb);
!gsi_end_p (gsi); )
{
phi = gsi.phi ();
res = PHI_RESULT (phi);
if (virtual_operand_p (res))
{
gsi_next (&gsi);
continue;
}
/* Check if it is a part of reduction. If it is,
keep the phi at the reduction's keep_res field. The
PHI_RESULT of this phi is the resulting value of the reduction
variable when exiting the loop. */
if (reduction_list->elements () > 0)
{
struct reduction_info *red;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val));
if (red)
{
red->keep_res = phi;
gsi_next (&gsi);
continue;
}
}
gcc_assert (control_name == NULL_TREE
&& SSA_NAME_VAR (res) == SSA_NAME_VAR (control));
control_name = res;
remove_phi_node (&gsi, false);
}
gcc_assert (control_name != NULL_TREE);
/* Initialize the control variable to number of iterations
according to the rhs of the exit condition. */
gimple_stmt_iterator gsi = gsi_after_labels (ex_bb);
cond_nit = as_a <gcond *> (last_stmt (exit->src));
nit_1 = gimple_cond_rhs (cond_nit);
nit_1 = force_gimple_operand_gsi (&gsi,
fold_convert (TREE_TYPE (control_name), nit_1),
false, NULL_TREE, false, GSI_SAME_STMT);
stmt = gimple_build_assign (control_name, nit_1);
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
}
/* Create the parallel constructs for LOOP as described in gen_parallel_loop.
LOOP_FN and DATA are the arguments of GIMPLE_OMP_PARALLEL.
NEW_DATA is the variable that should be initialized from the argument
of LOOP_FN. N_THREADS is the requested number of threads. Returns the
basic block containing GIMPLE_OMP_PARALLEL tree. */
static basic_block
create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
tree new_data, unsigned n_threads, location_t loc)
{
gimple_stmt_iterator gsi;
basic_block bb, paral_bb, for_bb, ex_bb;
tree t, param;
gomp_parallel *omp_par_stmt;
gimple omp_return_stmt1, omp_return_stmt2;
gimple phi;
gcond *cond_stmt;
gomp_for *for_stmt;
gomp_continue *omp_cont_stmt;
tree cvar, cvar_init, initvar, cvar_next, cvar_base, type;
edge exit, nexit, guard, end, e;
/* Prepare the GIMPLE_OMP_PARALLEL statement. */
bb = loop_preheader_edge (loop)->src;
paral_bb = single_pred (bb);
gsi = gsi_last_bb (paral_bb);
t = build_omp_clause (loc, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (t)
= build_int_cst (integer_type_node, n_threads);
omp_par_stmt = gimple_build_omp_parallel (NULL, t, loop_fn, data);
gimple_set_location (omp_par_stmt, loc);
gsi_insert_after (&gsi, omp_par_stmt, GSI_NEW_STMT);
/* Initialize NEW_DATA. */
if (data)
{
gassign *assign_stmt;
gsi = gsi_after_labels (bb);
param = make_ssa_name (DECL_ARGUMENTS (loop_fn));
assign_stmt = gimple_build_assign (param, build_fold_addr_expr (data));
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
assign_stmt = gimple_build_assign (new_data,
fold_convert (TREE_TYPE (new_data), param));
gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
}
/* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_PARALLEL. */
bb = split_loop_exit_edge (single_dom_exit (loop));
gsi = gsi_last_bb (bb);
omp_return_stmt1 = gimple_build_omp_return (false);
gimple_set_location (omp_return_stmt1, loc);
gsi_insert_after (&gsi, omp_return_stmt1, GSI_NEW_STMT);
/* Extract data for GIMPLE_OMP_FOR. */
gcc_assert (loop->header == single_dom_exit (loop)->src);
cond_stmt = as_a <gcond *> (last_stmt (loop->header));
cvar = gimple_cond_lhs (cond_stmt);
cvar_base = SSA_NAME_VAR (cvar);
phi = SSA_NAME_DEF_STMT (cvar);
cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
initvar = copy_ssa_name (cvar);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)),
initvar);
cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
gsi = gsi_last_nondebug_bb (loop->latch);
gcc_assert (gsi_stmt (gsi) == SSA_NAME_DEF_STMT (cvar_next));
gsi_remove (&gsi, true);
/* Prepare cfg. */
for_bb = split_edge (loop_preheader_edge (loop));
ex_bb = split_loop_exit_edge (single_dom_exit (loop));
extract_true_false_edges_from_block (loop->header, &nexit, &exit);
gcc_assert (exit == single_dom_exit (loop));
guard = make_edge (for_bb, ex_bb, 0);
single_succ_edge (loop->latch)->flags = 0;
end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU);
for (gphi_iterator gpi = gsi_start_phis (ex_bb);
!gsi_end_p (gpi); gsi_next (&gpi))
{
source_location locus;
tree def;
gphi *phi = gpi.phi ();
gphi *stmt;
stmt = as_a <gphi *> (
SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit)));
def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
locus = gimple_phi_arg_location_from_edge (stmt,
loop_preheader_edge (loop));
add_phi_arg (phi, def, guard, locus);
def = PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop));
locus = gimple_phi_arg_location_from_edge (stmt, loop_latch_edge (loop));
add_phi_arg (phi, def, end, locus);
}
e = redirect_edge_and_branch (exit, nexit->dest);
PENDING_STMT (e) = NULL;
/* Emit GIMPLE_OMP_FOR. */
gimple_cond_set_lhs (cond_stmt, cvar_base);
type = TREE_TYPE (cvar);
t = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
for_stmt = gimple_build_omp_for (NULL, GF_OMP_FOR_KIND_FOR, t, 1, NULL);
gimple_set_location (for_stmt, loc);
gimple_omp_for_set_index (for_stmt, 0, initvar);
gimple_omp_for_set_initial (for_stmt, 0, cvar_init);
gimple_omp_for_set_final (for_stmt, 0, gimple_cond_rhs (cond_stmt));
gimple_omp_for_set_cond (for_stmt, 0, gimple_cond_code (cond_stmt));
gimple_omp_for_set_incr (for_stmt, 0, build2 (PLUS_EXPR, type,
cvar_base,
build_int_cst (type, 1)));
gsi = gsi_last_bb (for_bb);
gsi_insert_after (&gsi, for_stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (initvar) = for_stmt;
/* Emit GIMPLE_OMP_CONTINUE. */
gsi = gsi_last_bb (loop->latch);
omp_cont_stmt = gimple_build_omp_continue (cvar_next, cvar);
gimple_set_location (omp_cont_stmt, loc);
gsi_insert_after (&gsi, omp_cont_stmt, GSI_NEW_STMT);
SSA_NAME_DEF_STMT (cvar_next) = omp_cont_stmt;
/* Emit GIMPLE_OMP_RETURN for GIMPLE_OMP_FOR. */
gsi = gsi_last_bb (ex_bb);
omp_return_stmt2 = gimple_build_omp_return (true);
gimple_set_location (omp_return_stmt2, loc);
gsi_insert_after (&gsi, omp_return_stmt2, GSI_NEW_STMT);
/* After the above dom info is hosed. Re-compute it. */
free_dominance_info (CDI_DOMINATORS);
calculate_dominance_info (CDI_DOMINATORS);
return paral_bb;
}
/* Generates code to execute the iterations of LOOP in N_THREADS
threads in parallel.
NITER describes number of iterations of LOOP.
REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
gen_parallel_loop (struct loop *loop,
reduction_info_table_type *reduction_list,
unsigned n_threads, struct tree_niter_desc *niter)
{
tree many_iterations_cond, type, nit;
tree arg_struct, new_arg_struct;
gimple_seq stmts;
edge entry, exit;
struct clsn_data clsn_data;
unsigned prob;
location_t loc;
gimple cond_stmt;
unsigned int m_p_thread=2;
/* From
---------------------------------------------------------------------
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
---------------------------------------------------------------------
with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
we generate the following code:
---------------------------------------------------------------------
if (MAY_BE_ZERO
|| NITER < MIN_PER_THREAD * N_THREADS)
goto original;
BODY1;
store all local loop-invariant variables used in body of the loop to DATA.
GIMPLE_OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
load the variables from DATA.
GIMPLE_OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
BODY2;
BODY1;
GIMPLE_OMP_CONTINUE;
GIMPLE_OMP_RETURN -- GIMPLE_OMP_FOR
GIMPLE_OMP_RETURN -- GIMPLE_OMP_PARALLEL
goto end;
original:
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
end:
*/
/* Create two versions of the loop -- in the old one, we know that the
number of iterations is large enough, and we will transform it into the
loop that will be split to loop_fn, the new one will be used for the
remaining iterations. */
/* We should compute a better number-of-iterations value for outer loops.
That is, if we have
for (i = 0; i < n; ++i)
for (j = 0; j < m; ++j)
...
we should compute nit = n * m, not nit = n.
Also may_be_zero handling would need to be adjusted. */
type = TREE_TYPE (niter->niter);
nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true,
NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
if (loop->inner)
m_p_thread=2;
else
m_p_thread=MIN_PER_THREAD;
many_iterations_cond =
fold_build2 (GE_EXPR, boolean_type_node,
nit, build_int_cst (type, m_p_thread * n_threads));
many_iterations_cond
= fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
invert_truthvalue (unshare_expr (niter->may_be_zero)),
many_iterations_cond);
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
if (!is_gimple_condexpr (many_iterations_cond))
{
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts,
true, NULL_TREE);
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
}
initialize_original_copy_tables ();
/* We assume that the loop usually iterates a lot. */
prob = 4 * REG_BR_PROB_BASE / 5;
loop_version (loop, many_iterations_cond, NULL,
prob, prob, REG_BR_PROB_BASE - prob, true);
update_ssa (TODO_update_ssa);
free_original_copy_tables ();
/* Base all the induction variables in LOOP on a single control one. */
canonicalize_loop_ivs (loop, &nit, true);
/* Ensure that the exit condition is the first statement in the loop. */
transform_to_exit_first_loop (loop, reduction_list, nit);
/* Generate initializations for reductions. */
if (reduction_list->elements () > 0)
reduction_list->traverse <struct loop *, initialize_reductions> (loop);
/* Eliminate the references to local variables from the loop. */
gcc_assert (single_exit (loop));
entry = loop_preheader_edge (loop);
exit = single_dom_exit (loop);
eliminate_local_variables (entry, exit);
/* In the old loop, move all variables non-local to the loop to a structure
and back, and create separate decls for the variables used in loop. */
separate_decls_in_region (entry, exit, reduction_list, &arg_struct,
&new_arg_struct, &clsn_data);
/* Create the parallel constructs. */
loc = UNKNOWN_LOCATION;
cond_stmt = last_stmt (loop->header);
if (cond_stmt)
loc = gimple_location (cond_stmt);
create_parallel_loop (loop, create_loop_fn (loc), arg_struct,
new_arg_struct, n_threads, loc);
if (reduction_list->elements () > 0)
create_call_for_reduction (loop, reduction_list, &clsn_data);
scev_reset ();
/* Cancel the loop (it is simpler to do it here rather than to teach the
expander to do it). */
cancel_loop_tree (loop);
/* Free loop bound estimations that could contain references to
removed statements. */
FOR_EACH_LOOP (loop, 0)
free_numbers_of_iterations_estimates_loop (loop);
}
/* Returns true when LOOP contains vector phi nodes. */
static bool
loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
gphi_iterator gsi;
bool res = true;
for (i = 0; i < loop->num_nodes; i++)
for (gsi = gsi_start_phis (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
if (TREE_CODE (TREE_TYPE (PHI_RESULT (gsi.phi ()))) == VECTOR_TYPE)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Create a reduction_info struct, initialize it with REDUC_STMT
and PHI, insert it to the REDUCTION_LIST. */
static void
build_new_reduction (reduction_info_table_type *reduction_list,
gimple reduc_stmt, gphi *phi)
{
reduction_info **slot;
struct reduction_info *new_reduction;
gcc_assert (reduc_stmt);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
"Detected reduction. reduction stmt is: \n");
print_gimple_stmt (dump_file, reduc_stmt, 0, 0);
fprintf (dump_file, "\n");
}
new_reduction = XCNEW (struct reduction_info);
new_reduction->reduc_stmt = reduc_stmt;
new_reduction->reduc_phi = phi;
new_reduction->reduc_version = SSA_NAME_VERSION (gimple_phi_result (phi));
new_reduction->reduction_code = gimple_assign_rhs_code (reduc_stmt);
slot = reduction_list->find_slot (new_reduction, INSERT);
*slot = new_reduction;
}
/* Callback for htab_traverse. Sets gimple_uid of reduc_phi stmts. */
int
set_reduc_phi_uids (reduction_info **slot, void *data ATTRIBUTE_UNUSED)
{
struct reduction_info *const red = *slot;
gimple_set_uid (red->reduc_phi, red->reduc_version);
return 1;
}
/* Detect all reductions in the LOOP, insert them into REDUCTION_LIST. */
static void
gather_scalar_reductions (loop_p loop, reduction_info_table_type *reduction_list)
{
gphi_iterator gsi;
loop_vec_info simple_loop_info;
simple_loop_info = vect_analyze_loop_form (loop);
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
affine_iv iv;
tree res = PHI_RESULT (phi);
bool double_reduc;
if (virtual_operand_p (res))
continue;
if (!simple_iv (loop, loop, res, &iv, true)
&& simple_loop_info)
{
gimple reduc_stmt = vect_force_simple_reduction (simple_loop_info,
phi, true,
&double_reduc);
if (reduc_stmt && !double_reduc)
build_new_reduction (reduction_list, reduc_stmt, phi);
}
}
destroy_loop_vec_info (simple_loop_info, true);
/* As gimple_uid is used by the vectorizer in between vect_analyze_loop_form
and destroy_loop_vec_info, we can set gimple_uid of reduc_phi stmts
only now. */
reduction_list->traverse <void *, set_reduc_phi_uids> (NULL);
}
/* Try to initialize NITER for code generation part. */
static bool
try_get_loop_niter (loop_p loop, struct tree_niter_desc *niter)
{
edge exit = single_dom_exit (loop);
gcc_assert (exit);
/* We need to know # of iterations, and there should be no uses of values
defined inside loop outside of it, unless the values are invariants of
the loop. */
if (!number_of_iterations_exit (loop, exit, niter, false))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: number of iterations not known\n");
return false;
}
return true;
}
/* Try to initialize REDUCTION_LIST for code generation part.
REDUCTION_LIST describes the reductions. */
static bool
try_create_reduction_list (loop_p loop,
reduction_info_table_type *reduction_list)
{
edge exit = single_dom_exit (loop);
gphi_iterator gsi;
gcc_assert (exit);
gather_scalar_reductions (loop, reduction_list);
for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
struct reduction_info *red;
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple reduc_phi;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
if (!virtual_operand_p (val))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "phi is ");
print_gimple_stmt (dump_file, phi, 0, 0);
fprintf (dump_file, "arg of phi to exit: value ");
print_generic_expr (dump_file, val, 0);
fprintf (dump_file, " used outside loop\n");
fprintf (dump_file,
" checking if it a part of reduction pattern: \n");
}
if (reduction_list->elements () == 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
reduc_phi = NULL;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val)
{
if (!gimple_debug_bind_p (USE_STMT (use_p))
&& flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
{
reduc_phi = USE_STMT (use_p);
break;
}
}
red = reduction_phi (reduction_list, reduc_phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "reduction phi is ");
print_gimple_stmt (dump_file, red->reduc_phi, 0, 0);
fprintf (dump_file, "reduction stmt is ");
print_gimple_stmt (dump_file, red->reduc_stmt, 0, 0);
}
}
}
/* The iterations of the loop may communicate only through bivs whose
iteration space can be distributed efficiently. */
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
tree def = PHI_RESULT (phi);
affine_iv iv;
if (!virtual_operand_p (def) && !simple_iv (loop, loop, def, &iv, true))
{
struct reduction_info *red;
red = reduction_phi (reduction_list, phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: scalar dependency between iterations\n");
return false;
}
}
}
return true;
}
/* Detect parallel loops and generate parallel code using libgomp
primitives. Returns true if some loop was parallelized, false
otherwise. */
static bool
parallelize_loops (void)
{
unsigned n_threads = flag_tree_parallelize_loops;
bool changed = false;
struct loop *loop;
struct tree_niter_desc niter_desc;
struct obstack parloop_obstack;
HOST_WIDE_INT estimated;
source_location loop_loc;
/* Do not parallelize loops in the functions created by parallelization. */
if (parallelized_function_p (cfun->decl))
return false;
if (cfun->has_nonlocal_label)
return false;
gcc_obstack_init (&parloop_obstack);
reduction_info_table_type reduction_list (10);
init_stmt_vec_info_vec ();
FOR_EACH_LOOP (loop, 0)
{
reduction_list.empty ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Trying loop %d as candidate\n",loop->num);
if (loop->inner)
fprintf (dump_file, "loop %d is not innermost\n",loop->num);
else
fprintf (dump_file, "loop %d is innermost\n",loop->num);
}
/* If we use autopar in graphite pass, we use its marked dependency
checking results. */
if (flag_loop_parallelize_all && !loop->can_be_parallel)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "loop is not parallel according to graphite\n");
continue;
}
if (!single_dom_exit (loop))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "loop is !single_dom_exit\n");
continue;
}
if (/* And of course, the loop must be parallelizable. */
!can_duplicate_loop_p (loop)
|| loop_has_blocks_with_irreducible_flag (loop)
|| (loop_preheader_edge (loop)->src->flags & BB_IRREDUCIBLE_LOOP)
/* FIXME: the check for vector phi nodes could be removed. */
|| loop_has_vector_phi_nodes (loop))
continue;
estimated = estimated_stmt_executions_int (loop);
if (estimated == -1)
estimated = max_stmt_executions_int (loop);
/* FIXME: Bypass this check as graphite doesn't update the
count and frequency correctly now. */
if (!flag_loop_parallelize_all
&& ((estimated != -1
&& estimated <= (HOST_WIDE_INT) n_threads * MIN_PER_THREAD)
/* Do not bother with loops in cold areas. */
|| optimize_loop_nest_for_size_p (loop)))
continue;
if (!try_get_loop_niter (loop, &niter_desc))
continue;
if (!try_create_reduction_list (loop, &reduction_list))
continue;
if (!flag_loop_parallelize_all
&& !loop_parallel_p (loop, &parloop_obstack))
continue;
changed = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
if (loop->inner)
fprintf (dump_file, "parallelizing outer loop %d\n",loop->header->index);
else
fprintf (dump_file, "parallelizing inner loop %d\n",loop->header->index);
loop_loc = find_loop_location (loop);
if (loop_loc != UNKNOWN_LOCATION)
fprintf (dump_file, "\nloop at %s:%d: ",
LOCATION_FILE (loop_loc), LOCATION_LINE (loop_loc));
}
gen_parallel_loop (loop, &reduction_list,
n_threads, &niter_desc);
}
free_stmt_vec_info_vec ();
obstack_free (&parloop_obstack, NULL);
/* Parallelization will cause new function calls to be inserted through
which local variables will escape. Reset the points-to solution
for ESCAPED. */
if (changed)
pt_solution_reset (&cfun->gimple_df->escaped);
return changed;
}
/* Parallelization. */
namespace {
const pass_data pass_data_parallelize_loops =
{
GIMPLE_PASS, /* type */
"parloops", /* name */
OPTGROUP_LOOP, /* optinfo_flags */
TV_TREE_PARALLELIZE_LOOPS, /* tv_id */
( PROP_cfg | PROP_ssa ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_parallelize_loops : public gimple_opt_pass
{
public:
pass_parallelize_loops (gcc::context *ctxt)
: gimple_opt_pass (pass_data_parallelize_loops, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *) { return flag_tree_parallelize_loops > 1; }
virtual unsigned int execute (function *);
}; // class pass_parallelize_loops
unsigned
pass_parallelize_loops::execute (function *fun)
{
if (number_of_loops (fun) <= 1)
return 0;
if (parallelize_loops ())
{
fun->curr_properties &= ~(PROP_gimple_eomp);
return TODO_update_ssa;
}
return 0;
}
} // anon namespace
gimple_opt_pass *
make_pass_parallelize_loops (gcc::context *ctxt)
{
return new pass_parallelize_loops (ctxt);
}
|
GB_bitmap_assign_IxJ_template.c | //------------------------------------------------------------------------------
// GB_bitmap_assign_IxJ_template: iterate over all of C(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Iterate over all positions in the IxJ Cartesian product. This is all
// entries C(i,j) where i is in the list I and j is in the list J. This
// traversal occurs whether or not C(i,j) is an entry present in C.
// The C matrix is accessed at C(I,J). The A matrix is size |I|-by-|J|.
// For bitmap assignent, C(I,J)=A is being computed. For bitmap extraction,
// C=A(I,J) so the roles of A and C are swapped (see GB_bitmap_subref.c).
{
//--------------------------------------------------------------------------
// create the tasks to iterate over IxJ
//--------------------------------------------------------------------------
int ntasks = 0, nthreads ;
GB_task_struct *TaskList = NULL ; size_t TaskList_size = 0 ;
GB_OK (GB_subassign_IxJ_slice (&TaskList, &TaskList_size, &ntasks,
&nthreads, /* I, */ nI, /* Ikind, Icolon, J, */ nJ,
/* Jkind, Jcolon, */ Context)) ;
//--------------------------------------------------------------------------
// iterate over all IxJ
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
int64_t kfirst = TaskList [taskid].kfirst ;
int64_t klast = TaskList [taskid].klast ;
int64_t task_cnvals = 0 ;
bool fine_task = (klast == -1) ;
int64_t iA_start = 0, iA_end = nI ;
if (fine_task)
{
// a fine task operates on a slice of a single vector
klast = kfirst ;
iA_start = TaskList [taskid].pA ;
iA_end = TaskList [taskid].pA_end ;
}
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t jA = kfirst ; jA <= klast ; jA++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, jA, Jkind, Jcolon) ;
int64_t pC0 = jC * vlen ; // first entry in C(:,jC)
int64_t pA0 = jA * nI ; // first entry in A(:,jA)
//------------------------------------------------------------------
// operate on C (I(iA_start,iA_end-1),jC)
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
int64_t pC = iC + pC0 ;
int64_t pA = iA + pA0 ;
// operate on C(iC,jC) at pC (if C is bitmap or full)
// and A(iA,jA) or M(iA,jA) at pA, if A and/or M are
// bitmap or full. M(iA,jA) is accessed only for the
// subassign method when M is bitmap or full.
GB_IXJ_WORK (pC, pA) ;
}
}
cnvals += task_cnvals ;
}
//--------------------------------------------------------------------------
// free workpace
//--------------------------------------------------------------------------
GB_FREE_WORK (&TaskList, TaskList_size) ;
}
|
GB_unop__identity_fp32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_uint64)
// op(A') function: GB (_unop_tran__identity_fp32_uint64)
// C type: float
// A type: uint64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_uint64)
(
float *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% John Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireCacheView(image);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
id;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=GetOpenMPMaximumThreads();
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (id=0; id < (ssize_t) number_threads; id++)
current_depth[id]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
register const PixelPacket
*restrict p;
register ssize_t
i;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
if (status == MagickFalse)
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelRed(p),
range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelGreen(p),
range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelBlue(p),
range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickStatusType
status;
QuantumAny
range;
status=0;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
status|=GetPixelRed(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelRed(p),range),range);
if ((channel & GreenChannel) != 0)
status|=GetPixelGreen(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelGreen(p),range),range);
if ((channel & BlueChannel) != 0)
status|=GetPixelBlue(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelBlue(p),range),range);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
status|=GetPixelOpacity(p) != ScaleAnyToQuantum(
ScaleQuantumToAny(GetPixelOpacity(p),range),range);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
status|=GetPixelIndex(indexes+x) !=
ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex(indexes+
x),range),range);
if (status == 0)
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (id=1; id < (ssize_t) number_threads; id++)
if (depth < current_depth[id])
depth=current_depth[id];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double)
MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IsRGBColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (GetImageDepth(image,&image->exception) <= (size_t)
MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH))
{
image->depth=depth;
return(MagickTrue);
}
/*
Scale pixels to desired depth.
*/
status=MagickTrue;
range=GetQuantumRange(depth);
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelRed(q),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelGreen(q),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelBlue(q),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelOpacity(q),range),range));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,ScaleAnyToQuantum(ScaleQuantumToAny(
GetPixelIndex(indexes+x),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelPacket
*restrict p;
p=image->colormap;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range);
if ((channel & GreenChannel) != 0)
p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range);
if ((channel & BlueChannel) != 0)
p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range);
if ((channel & OpacityChannel) != 0)
p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range),
range);
p++;
}
}
image->depth=depth;
return(status);
}
|
GB_binop__bxor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__bxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bxor_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int64)
// C=scalar+B GB (_bind1st__bxor_int64)
// C=scalar+B' GB (_bind1st_tran__bxor_int64)
// C=A+scalar GB (_bind2nd__bxor_int64)
// C=A'+scalar GB (_bind2nd_tran__bxor_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT64 || GxB_NO_BXOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxor_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxor_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Graph.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* ******************************************************************************/
#include <cstring>
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <unistd.h>
#include <cstdlib>
#include <sys/time.h>
#include <parallel/algorithm>
#include <omp.h>
#include <cassert>
namespace GraphMat {
inline double sec(struct timeval start, struct timeval end)
{
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec))))/1.0e6;
}
template<class T>
void AddFn(T a, T b, T* c, void* vsp) {
*c = a + b ;
}
template <class V, class E=int>
class Graph {
public:
int nvertices;
long long int nnz;
bool vertexpropertyowner;
int tiles_per_dim;
int num_threads;
GraphMat::SpMat<GraphMat::DCSCTile<E> > *A;
GraphMat::SpMat<GraphMat::DCSCTile<E> > *AT;
GraphMat::SpVec<GraphMat::DenseSegment<V> > * vertexproperty;
GraphMat::SpVec<GraphMat::DenseSegment<bool> > * active;
public:
Graph(): nvertices(0), nnz(0), vertexpropertyowner(true),
tiles_per_dim(GraphMat::get_global_nrank()),
A(nullptr), AT(nullptr), num_threads(omp_get_max_threads()),
vertexproperty(nullptr), active(nullptr) {}
void ReadEdgelist(GraphMat::edgelist_t<E> A_edges);
void getVertexEdgelist(GraphMat::edgelist_t<V> & myedges);
void getEdgelist(GraphMat::edgelist_t<E> & myedges);
void ReadMTX(const char* filename, bool binary, bool header, bool weights);
void ReadGraphMatBin(const char* filename);
void WriteGraphMatBin(const char* filename);
void setAllActive();
void setAllInactive();
void setActive(int v);
void setInactive(int v);
void setAllVertexproperty(const V& val);
void setVertexproperty(int v, const V& val);
V getVertexproperty(int v) const;
bool vertexNodeOwner(const int v) const;
void saveVertexproperty(std::string fname, bool includeHeader=true) const;
void reset();
void shareVertexProperty(Graph<V,E>& g);
int getNumberOfVertices() const;
void applyToAllVertices(void (*ApplyFn)(V, V*, void*), void* param=nullptr);
template<class T> void applyReduceAllVertices(T* val, void (*ApplyFn)(V*, T*, void*), void (*ReduceFn)(T,T,T*,void*)=AddFn<T>, void* param=nullptr);
~Graph();
private:
int vertexToNative(int vertex, int nsegments, int len) const;
int nativeToVertex(int vertex, int nsegments, int len) const;
};
template<class V, class E>
int Graph<V,E>::vertexToNative(int vertex, int nsegments, int len) const
{
if (true) {
int v = vertex-1;
int npartitions = num_threads * 16 * nsegments;
int height = len / npartitions;
int vmax = height * npartitions;
if(v >= vmax)
{
return v+1;
}
int col = v%npartitions;
int row = v/npartitions;
return row + col * height+ 1;
} else {
return vertex;
}
}
template<class V, class E>
int Graph<V,E>::nativeToVertex(int vertex, int nsegments, int len) const
{
if (true) {
int v = vertex-1;
int npartitions = num_threads * 16 * nsegments;
int height = len / npartitions;
int vmax = height * npartitions;
if(v >= vmax)
{
return v+1;
}
int col = v/height;
int row = v%height;
return col + row * npartitions+ 1;
} else {
return vertex;
}
}
template<class V, class E>
void Graph<V,E>::ReadGraphMatBin(const char* filename) {
std::stringstream fname_ss;
fname_ss << filename << GraphMat::get_global_myrank();
std::cout << "Reading file " << fname_ss.str() << std::endl;
std::ifstream ifilestream(fname_ss.str().c_str(), std::ios::in|std::ios::binary);
boost::archive::binary_iarchive bi(ifilestream);
struct timeval start, end;
gettimeofday(&start, 0);
bi >> A;
bi >> AT;
tiles_per_dim = GraphMat::get_global_nrank();
if(A->ntiles_x != tiles_per_dim || A->ntiles_y != tiles_per_dim ||
AT->ntiles_x != tiles_per_dim || AT->ntiles_y != tiles_per_dim) {
std::cout << "Error reading file - mismatch in number of MPI ranks used in load vs save graph" << std::endl;
exit(1);
}
bi >> num_threads;
if(num_threads != omp_get_max_threads()) {
std::cout << "Error reading file - mismatch in number of OpenMP threads used in load vs save graph" << std::endl;
exit(1);
}
nvertices = A->m;
vertexproperty = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
V *__v = new V;
vertexproperty->setAll(*__v);
delete __v;
active = new GraphMat::SpVec<GraphMat::DenseSegment<bool> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
active->setAll(false);
vertexpropertyowner = true;
nnz = A->getNNZ();
gettimeofday(&end, 0);
std::cout << "Finished GraphMat read + construction, time: " << sec(start,end) << std::endl;
ifilestream.close();
MPI_Barrier(MPI_COMM_WORLD);
}
template<class V, class E>
void Graph<V,E>::WriteGraphMatBin(const char* filename) {
std::stringstream fname_ss;
fname_ss << filename << GraphMat::get_global_myrank();
std::cout << "Writing file " << fname_ss.str() << std::endl;
std::ofstream ofilestream(fname_ss.str().c_str(), std::ios::out|std::ios::binary);
boost::archive::binary_oarchive bo(ofilestream);
bo << A;
bo << AT;
bo << num_threads;
ofilestream.close();
MPI_Barrier(MPI_COMM_WORLD);
}
template<class V, class E>
void Graph<V,E>::ReadEdgelist(GraphMat::edgelist_t<E> A_edges) {
struct timeval start, end;
gettimeofday(&start, 0);
tiles_per_dim = GraphMat::get_global_nrank();
num_threads = omp_get_max_threads();
#pragma omp parallel for
for(int i = 0 ; i < A_edges.nnz ; i++)
{
A_edges.edges[i].src = vertexToNative(A_edges.edges[i].src, tiles_per_dim, A_edges.m);
A_edges.edges[i].dst = vertexToNative(A_edges.edges[i].dst, tiles_per_dim, A_edges.m);
}
A = new GraphMat::SpMat<GraphMat::DCSCTile<E> >(A_edges, tiles_per_dim, tiles_per_dim, GraphMat::partition_fn_2d);
GraphMat::Transpose(A, &AT, tiles_per_dim, tiles_per_dim, GraphMat::partition_fn_2d);
int m_ = A->m;
assert(A->m == A->n);
nnz = A->getNNZ();
vertexproperty = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
V *__v = new V;
vertexproperty->setAll(*__v);
delete __v;
active = new GraphMat::SpVec<GraphMat::DenseSegment<bool> >(A->m, tiles_per_dim, GraphMat::vector_partition_fn);
active->setAll(false);
nvertices = m_;
vertexpropertyowner = true;
gettimeofday(&end, 0);
std::cout << "Finished GraphMat read + construction, time: " << sec(start,end) << std::endl;
}
template<class V, class E>
void Graph<V,E>::ReadMTX(const char* filename, bool binary, bool header, bool weights) {
GraphMat::edgelist_t<E> A_edges;
GraphMat::load_edgelist(filename, &A_edges, binary, header, weights);// binary format, header and edge weights
if (A_edges.m != A_edges.n) {
auto maxn = std::max(A_edges.m, A_edges.n);
A_edges.m = maxn;
A_edges.n = maxn;
}
ReadEdgelist(A_edges);
A_edges.clear();
}
template<class V, class E>
void Graph<V,E>::setAllActive() {
active->setAll(true);
}
template<class V, class E>
void Graph<V,E>::setAllInactive() {
active->setAll(false);
int global_myrank = GraphMat::get_global_myrank();
for(int segmentId = 0 ; segmentId < active->nsegments ; segmentId++)
{
if(active->nodeIds[segmentId] == global_myrank)
{
GraphMat::DenseSegment<bool>* s1 = active->segments[segmentId];
GraphMat::clear_dense_segment(s1->properties->value, s1->properties->bit_vector, s1->num_ints);
}
}
}
template<class V, class E>
void Graph<V,E>::setActive(int v) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
active->set(v_new, true);
}
template<class V, class E>
void Graph<V,E>::setInactive(int v) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
active->set(v_new, false);
}
template<class V, class E>
void Graph<V,E>::reset() {
setAllInactive();
V v;
vertexproperty->setAll(v);
}
template<class V, class E>
void Graph<V,E>::shareVertexProperty(Graph<V,E>& g) {
if (vertexproperty != nullptr) delete vertexproperty;
vertexproperty = g.vertexproperty;
vertexpropertyowner = false;
}
template<class V, class E>
void Graph<V,E>::setAllVertexproperty(const V& val) {
vertexproperty->setAll(val);
}
template<class V, class E>
void Graph<V,E>::setVertexproperty(int v, const V& val) {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
vertexproperty->set(v_new, val);
}
template<class V, class E>
void Graph<V,E>::getVertexEdgelist(GraphMat::edgelist_t<V> & myedges) {
vertexproperty->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
}
}
template<class V, class E>
void Graph<V,E>::getEdgelist(GraphMat::edgelist_t<E> & myedges) {
A->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
}
}
template<class V, class E>
void Graph<V,E>::saveVertexproperty(std::string fname, bool includeHeader) const {
GraphMat::edgelist_t<V> myedges;
vertexproperty->get_edges(&myedges);
for(unsigned int i = 0 ; i < myedges.nnz ; i++)
{
myedges.edges[i].src = nativeToVertex(myedges.edges[i].src, tiles_per_dim, nvertices);
}
GraphMat::SpVec<GraphMat::DenseSegment<V> > * vertexproperty2 = new GraphMat::SpVec<GraphMat::DenseSegment<V> >(nvertices, tiles_per_dim, GraphMat::vector_partition_fn);
vertexproperty2->ingestEdgelist(myedges);
myedges.clear();
vertexproperty2->save(fname, includeHeader);
delete vertexproperty2;
}
template<class V, class E>
bool Graph<V,E>::vertexNodeOwner(const int v) const {
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
return vertexproperty->node_owner(v_new);
}
template<class V, class E>
V Graph<V,E>::getVertexproperty(const int v) const {
V vp ;
int v_new = vertexToNative(v, tiles_per_dim, nvertices);
vertexproperty->get(v_new, &vp);
return vp;
}
template<class V, class E>
int Graph<V,E>::getNumberOfVertices() const {
return nvertices;
}
template<class V, class E>
void Graph<V,E>::applyToAllVertices( void (*ApplyFn)(V, V*, void*), void* param) {
GraphMat::Apply(vertexproperty, vertexproperty, ApplyFn, param);
}
template<class V, class E>
template<class T>
void Graph<V,E>::applyReduceAllVertices(T* val, void (*ApplyFn)(V*, T*, void*), void (*ReduceFn)(T,T,T*,void*), void* param) {
GraphMat::MapReduce(vertexproperty, val, ApplyFn, ReduceFn, param);
}
template<class V, class E>
Graph<V,E>::~Graph() {
if (A != nullptr) {
delete A;
A = nullptr;
}
if (AT != nullptr) {
delete AT;
AT = nullptr;
}
if (vertexpropertyowner) {
if (vertexproperty != nullptr) {
delete vertexproperty;
vertexproperty = nullptr;
}
}
if (active != nullptr) {
delete active;
active = nullptr;
}
}
} //namespace GraphMat
|
private-clauseModificado2.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main()
{
int i, n = 7;
int a[n], suma;
for (i=0; i<n; i++)
a[i] = i;
printf("-- suma_antes_de_parallel = %i\n", suma);
#pragma omp parallel
{
suma=0;
#pragma omp for
for (i=0; i<n; i++){
suma = suma + a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
printf("-- suma = %i\n", suma);
} |
lagrangian_particle_utilities.h | /*
==============================================================================
KratosTestApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2010
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: rrossi $
// Date: $Date: 2007-03-06 10:30:31 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED )
#define KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED
#define PRESSURE_ON_EULERIAN_MESH
#define USE_FEW_PARTICLES
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/cfd_variables.h"
#include "utilities/geometry_utilities.h"
#include "geometries/tetrahedra_3d_4.h"
#include "incompressible_fluid_application.h"
#include "spatial_containers/spatial_containers.h"
#include "utilities/timer.h"
#include "processes/node_erase_process.h"
#include "utilities/binbased_fast_point_locator.h"
namespace Kratos
{
template< class T, std::size_t dim >
class DistanceCalculator
{
public:
double operator()(T const& p1, T const& p2)
{
double dist = 0.0;
for (std::size_t i = 0; i < dim; i++)
{
double tmp = p1[i] - p2[i];
dist += tmp*tmp;
}
return dist; //square distance because it is easier to work without the square root//
}
};
template<std::size_t TDim> class LagrangianParticleUtils
{
public:
KRATOS_CLASS_POINTER_DEFINITION(LagrangianParticleUtils<TDim>);
//**********************************************************************************************
//**********************************************************************************************
void BackAndForth(array_1d<double, 3 > & body_force, const double density, const double dt, const double subdivisions, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart)
{
KRATOS_TRY
//clear lagrangian model par and reseed it
rLagrangianModelPart.Nodes().clear();
Reseed(rEulerianModelPart, rLagrangianModelPart);
double density_inverse = 1.0 / density;
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
// typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins;
// typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins;
//*************
// DynamicBins;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
// typedef Tree< OCTreePartition<BucketType> > tree; //Octree;
// typedef Tree< StaticBins > tree; //Binstree;
// typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins;
// typedef typename KdtreeBins::Partitions SubPartitions;
// typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins;
/*
typedef Bins< TDim, PointType, stdPointVector> stdBins;
typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
pnode->Set(TO_ERASE, true);
node_it->GetValue(IS_VISITED) = 0;
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
// //reset the position to the position at the end of the step
// array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1);
// (node_it)->FastGetSolutionStepValue(DISPLACEMENT) = old_disp;
//
// (node_it)->X() = (node_it)->X0() + old_disp[0];
// (node_it)->Y() = (node_it)->Y0() + old_disp[1];
// (node_it)->Z() = (node_it)->Z0() + old_disp[2];
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
array_1d<double, TDim + 1 > N; //Shape functions vector//
array_1d<double, TDim + 1 > pressures; //Shape functions vector//
boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim> DN_DX;
array_1d<double, TDim> gradp;
array_1d<double, 3 > acc_particle;
array_1d<double, 3 > veulerian;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
double small_dt = dt / subdivisions;
for (unsigned int substep = 0; substep < subdivisions; substep++)
{
//compute the tree with the position of the nodes
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//loop over all of the elements in the eulerian mesh to perform the interpolation
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >&geom = el_it->GetGeometry();
//find the center and "radius" of the element
double xc, yc, zc, radius;
CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N);
work_point.X() = xc;
work_point.Y() = yc;
work_point.Z() = zc;
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(),
SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
//check if inside
for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++)
{
bool is_inside = false;
is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N);
if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0)
{
// KRATOS_WATCH("219")
(*it_found)->GetValue(IS_VISITED) = 1;
//move according to the streamline
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1);
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) -= small_dt*veulerian;
array_1d<double, 3 > & vel_particle = (*it_found)->FastGetSolutionStepValue(VELOCITY);
noalias(vel_particle) = veulerian;
}
}
}
}
//position is to be updated only after all of the searches!
// std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl;
for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin();
it != rLagrangianModelPart.NodesEnd(); it++)
{
noalias(it->Coordinates()) = it->GetInitialPosition();
noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT);
(it)->GetValue(IS_VISITED) = 0;
}
}
//now go forth (computing the acceleration)
for (unsigned int substep = 0; substep < subdivisions; substep++)
{
//compute the tree with the position of the nodes
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//loop over all of the elements in the eulerian mesh to perform the interpolation
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >&geom = el_it->GetGeometry();
//find the center and "radius" of the element
double xc, yc, zc, radius;
CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N);
work_point.X() = xc;
work_point.Y() = yc;
work_point.Z() = zc;
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(),
SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
//check if inside
for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++)
{
bool is_inside = false;
is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N);
if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0)
{
(*it_found)->GetValue(IS_VISITED) = 1;
//move according to the streamline
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1);
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) += small_dt*veulerian;
//compute particle velocity
noalias(acc_particle) = body_force - N[0] * geom[0].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse;
for (unsigned int k = 1; k < geom.size(); k++)
noalias(acc_particle) -= N[k] * geom[k].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse;
array_1d<double, 3 > & vel_particle = (*it_found)->FastGetSolutionStepValue(VELOCITY);
noalias(vel_particle) += small_dt*acc_particle;
}
}
}
}
//position is to be updated only after all of the searches!
// std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl;
for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin();
it != rLagrangianModelPart.NodesEnd(); it++)
{
noalias(it->Coordinates()) = it->GetInitialPosition();
noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT);
(it)->GetValue(IS_VISITED) = 0;
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void ConvectParticles(const double dt, const double subdivisions, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, bool use_eulerian_velocity)
{
KRATOS_TRY
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
// typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins;
// typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins;
//*************
// DynamicBins;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
// typedef Tree< OCTreePartition<BucketType> > tree; //Octree;
// typedef Tree< StaticBins > tree; //Binstree;
// typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins;
// typedef typename KdtreeBins::Partitions SubPartitions;
// typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins;
/*
typedef Bins< TDim, PointType, stdPointVector> stdBins;
typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
pnode->Set(TO_ERASE, true);
node_it->GetValue(IS_VISITED) = 0;
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
//reset the position to the position at the end of the step
array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1);
(node_it)->FastGetSolutionStepValue(DISPLACEMENT) = old_disp;
(node_it)->X() = (node_it)->X0() + old_disp[0];
(node_it)->Y() = (node_it)->Y0() + old_disp[1];
(node_it)->Z() = (node_it)->Z0() + old_disp[2];
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
array_1d<double, TDim + 1 > N; //Shape functions vector//
array_1d<double, TDim + 1 > pressures; //Shape functions vector//
boost::numeric::ublas::bounded_matrix<double, TDim + 1, TDim> DN_DX;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
double small_dt = dt / subdivisions;
for (unsigned int substep = 0; substep < subdivisions; substep++)
{
//compute the tree with the position of the nodes
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//loop over all of the elements in the eulerian mesh to perform the interpolation
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >&geom = el_it->GetGeometry();
//find the center and "radius" of the element
double xc, yc, zc, radius;
CalculateCenterAndSearchRadius(geom, xc, yc, zc, radius, N);
work_point.X() = xc;
work_point.Y() = yc;
work_point.Z() = zc;
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(),
SquaredResultsDistances.begin(), MaximumNumberOfResults);
array_1d<double, 3 > veulerian;
if (number_of_points_in_radius > 0)
{
//check if inside
for (PointIterator it_found = Results.begin(); it_found != Results.begin() + number_of_points_in_radius; it_found++)
{
bool is_inside = false;
is_inside = CalculatePosition(geom, (*it_found)->X(), (*it_found)->Y(), (*it_found)->Z(), N);
if (is_inside == true && (*it_found)->GetValue(IS_VISITED) == 0)
{
(*it_found)->GetValue(IS_VISITED) = 1;
//move according to the streamline
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1);
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3 > & disp = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) += small_dt*veulerian;
(*it_found)->Set(TO_ERASE, false);
if (substep == 0 && use_eulerian_velocity == true)
{
double temperature = N[0] * geom[0].FastGetSolutionStepValue(TEMPERATURE);
for (unsigned int k = 1; k < geom.size(); k++)
temperature += N[k] * geom[k].FastGetSolutionStepValue(TEMPERATURE);
(*it_found)->FastGetSolutionStepValue(TEMPERATURE) = temperature;
}
}
}
}
}
//position is to be updated only after all of the searches!
// std::cout << "substep= " << substep << (rLagrangianModelPart.NodesBegin()+200)->FastGetSolutionStepValue(DISPLACEMENT) << std::endl;
for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin();
it != rLagrangianModelPart.NodesEnd(); it++)
{
noalias(it->Coordinates()) = it->GetInitialPosition();
noalias(it->Coordinates()) += it->FastGetSolutionStepValue(DISPLACEMENT);
(it)->GetValue(IS_VISITED) = 0;
}
}
//perform the erase
NodeEraseProcess(rLagrangianModelPart).Execute();
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void StreamlineMove(array_1d<double, 3 > & body_force, const double density, const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, bool use_eulerian_velocity, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
//should be done outside!!!
// BinBasedFastPointLocator<TDim> node_locator(rEulerianModelPart);
// node_locator.UpdateSearchDatabase();
double density_inverse = 1.0 / density;
//reset particle position to the beginning of the step
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
Node < 3 > ::Pointer pnode = *(node_it.base());
pnode->Set(TO_ERASE, true);
node_it->GetValue(IS_VISITED) = 0;
//reset the position to the position at the end of the step
const array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1);
noalias((node_it)->FastGetSolutionStepValue(DISPLACEMENT)) = old_disp;
const array_1d<double, 3 > & old_vel = (node_it)->FastGetSolutionStepValue(VELOCITY, 1);
noalias((node_it)->FastGetSolutionStepValue(VELOCITY)) = old_vel;
(node_it)->X() = (node_it)->X0() + old_disp[0];
(node_it)->Y() = (node_it)->Y0() + old_disp[1];
(node_it)->Z() = (node_it)->Z0() + old_disp[2];
}
//KRATOS_WATCH("539")
array_1d<double, 3 > veulerian;
array_1d<double, 3 > acc_particle;
array_1d<double, TDim + 1 > N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
//KRATOS_WATCH("551")
#pragma omp parallel for firstprivate(results,N,veulerian,acc_particle)
for (int i = 0; i < nparticles; i++)
{
unsigned int substep = 0;
unsigned int subdivisions = 1;
double small_dt = dt;
while(substep++ < subdivisions)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
(iparticle)->Set(TO_ERASE, true);
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
// KRATOS_WATCH("561")
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
// KRATOS_WATCH("564")
if (is_found == true)
{
(pparticle)->GetValue(IS_VISITED) = 1;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
noalias(veulerian) = N[0] * geom[0].FastGetSolutionStepValue(VELOCITY, 1);
for (unsigned int k = 1; k < geom.size(); k++)
noalias(veulerian) += N[k] * geom[k].FastGetSolutionStepValue(VELOCITY, 1);
//compute adaptive subdivisions
if(substep == 1)
{
//compute h
double h = N[0] * geom[0].FastGetSolutionStepValue(NODAL_H);
for (unsigned int k = 1; k < geom.size(); k++)
h += N[k] * geom[k].FastGetSolutionStepValue(NODAL_H);
//compute number of subdivisions needed
const unsigned int min_subdivisions = 3;
const unsigned int max_subdivisions = 20;
double v = norm_2(veulerian);
double subdivisions = double(floor(2*dt*v/h));
subdivisions = (subdivisions<min_subdivisions) ? min_subdivisions : (subdivisions>max_subdivisions) ? max_subdivisions : subdivisions;
//compute subdivisions time step
small_dt = dt / subdivisions;
//KRATOS_WATCH(subdivisions)
}
//move according to the streamline
array_1d<double, 3 > & disp = (iparticle)->FastGetSolutionStepValue(DISPLACEMENT);
noalias(disp) += small_dt*veulerian;
(pparticle)->Set(TO_ERASE, false);
// KRATOS_WATCH("585")
//compute particle velocity
noalias(acc_particle) = body_force - N[0] * geom[0].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse;
for (unsigned int k = 1; k < geom.size(); k++)
noalias(acc_particle) -= N[k] * geom[k].FastGetSolutionStepValue(PRESS_PROJ) * density_inverse;
array_1d<double, 3 > & force_particle = (iparticle)->FastGetSolutionStepValue(FORCE);
noalias(force_particle) = ZeroVector(3);
for (unsigned int k = 0; k < geom.size(); k++)
{
noalias(acc_particle) += N[k] * geom[k].FastGetSolutionStepValue(FORCE) * density_inverse;
force_particle += N[k] * geom[k].FastGetSolutionStepValue(FORCE) * density_inverse;
}
//KRATOS_WATCH("599")
array_1d<double, 3 > & vel_particle = (pparticle)->FastGetSolutionStepValue(VELOCITY);
if (use_eulerian_velocity == true)
{
if (substep == 1)
{
// noalias((*it_found)->FastGetSolutionStepValue(DISPLACEMENT)) = (*it_found)->FastGetSolutionStepValue(DISPLACEMENT,1);
noalias(vel_particle) = veulerian;
noalias((pparticle)->FastGetSolutionStepValue(VELOCITY, 1)) = veulerian;
}
}
noalias(vel_particle) += small_dt*acc_particle;
//update position
noalias(iparticle->Coordinates()) = iparticle->GetInitialPosition();
noalias(iparticle->Coordinates()) += iparticle->FastGetSolutionStepValue(DISPLACEMENT);
(iparticle)->GetValue(IS_VISITED) = 0;
//KRATOS_WATCH("619")
}
}
}
//KRATOS_WATCH("622")
//erase nodes whose velocity is far inconsistent with the displacement increment (typically nodes that get stuck to the wall)
// for (ModelPart::NodesContainerType::iterator it = rLagrangianModelPart.NodesBegin();
// it != rLagrangianModelPart.NodesEnd(); it++)
// {
// if (it->Is(TO_ERASE)!= true)
// {
// array_1d<double,3> delta_disp = it->FastGetSolutionStepValue (DISPLACEMENT);
// noalias (delta_disp) -= it->FastGetSolutionStepValue (DISPLACEMENT,1);
// double norm_delta_disp = norm_2 (delta_disp);
// // array_1d<double,3> avg_vel = it->FastGetSolutionStepValue(VELOCITY);
// // avg_vel += it->FastGetSolutionStepValue(VELOCITY,1);
// // avg_vel *= 0.5;
// // double norm_v = norm_2(avg_vel);
// array_1d<double,3> v_old = it->FastGetSolutionStepValue (VELOCITY,1);
// double norm_v = norm_2 (v_old);
// if (norm_delta_disp*3.0 < norm_v*dt )
// it->Set(TO_ERASE, true);
// if (norm_delta_disp*0.333333333333333 > norm_v*dt )
// it->Set(TO_ERASE, true);
// }
// }
//perform the erase
// int nparticles_before_erase = rLagrangianModelPart.Nodes().size();
//NodeEraseProcess(rLagrangianModelPart).Execute();
// int nparticles_after_erase = rLagrangianModelPart.Nodes().size();
// std::cout << "n particles erased during streamline move =" << nparticles_after_erase - nparticles_before_erase <<std::endl;
KRATOS_CATCH("")
}
void StreamlineCorrect(const double density, const double dt, ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart,BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
double density_inverse = 1.0 / density;
array_1d<double, 3 > acc_particle;
array_1d<double, TDim + 1 > N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
/* //reset particle position to the beginning of the step
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
Node < 3 > ::Pointer pnode = *(node_it.base());
node_it->GetValue(IS_VISITED) = 0;
//reset the position to the position at the end of the step
const array_1d<double, 3 > & old_disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT, 1);
(node_it)->X() = (node_it)->X0() + old_disp[0];
(node_it)->Y() = (node_it)->Y0() + old_disp[1];
(node_it)->Z() = (node_it)->Z0() + old_disp[2];
}*/
#pragma omp parallel for firstprivate(results,N,acc_particle)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
(pparticle)->GetValue(IS_VISITED) = 1;
Geometry< Node < 3 > >& geom = pelement->GetGeometry();
//correct particle velocity
noalias(acc_particle) = - N[0]* density_inverse * (geom[0].FastGetSolutionStepValue(PRESS_PROJ) - geom[0].FastGetSolutionStepValue(PRESS_PROJ,1) );
for (unsigned int k = 1; k < geom.size(); k++)
noalias(acc_particle) -= N[k]* density_inverse * (geom[k].FastGetSolutionStepValue(PRESS_PROJ) - geom[k].FastGetSolutionStepValue(PRESS_PROJ,1));
array_1d<double, 3 > & vel_particle = (pparticle)->FastGetSolutionStepValue(VELOCITY);
noalias(vel_particle) += dt*acc_particle;
}
}
/* //bring back particles to their position
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
Node < 3 > ::Pointer pnode = *(node_it.base());
//reset the position to the position at the end of the step
const array_1d<double, 3 > & disp = (node_it)->FastGetSolutionStepValue(DISPLACEMENT);
(node_it)->X() = (node_it)->X0() + disp[0];
(node_it)->Y() = (node_it)->Y0() + disp[1];
(node_it)->Z() = (node_it)->Z0() + disp[2];
}*/
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
//function to seed a list of new nodes
void Reseed(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart)
{
KRATOS_TRY;
unsigned int id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
rLagrangianModelPart.Nodes().clear();
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin();
node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
int node_id = id++;
double x = node_it->X();
double y = node_it->Y();
double z = node_it->Z();
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, x, y, z);
pnode->FastGetSolutionStepValue(VELOCITY) = node_it->FastGetSolutionStepValue(VELOCITY);
}
#ifdef USE_FEW_PARTICLES
boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > pos;
boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > N;
#else
boost::numeric::ublas::bounded_matrix<double, 16, 3 > pos;
boost::numeric::ublas::bounded_matrix<double, 16, 3 > N;
#endif
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
Geometry<Node < 3 > >& geom = el_it->GetGeometry();
ComputeGaussPointPositions(geom, pos, N);
for (unsigned int i = 0; i < pos.size1(); i++)
{
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
for (unsigned int j = 0; j < TDim + 1; j++)
noalias(vel) += N(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY);
}
}
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
node_it->FastGetSolutionStepValue(VELOCITY, 1) = node_it->FastGetSolutionStepValue(VELOCITY);
}
KRATOS_CATCH("");
}
//**********************************************************************************************
//**********************************************************************************************
//function to seed a list of new nodes
void ReseedEmptyElements(ModelPart& rEulerianModelPart, ModelPart& rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator,
int min_number_of_particles, int max_number_of_particles)
{
KRATOS_TRY;
int ninitial_particles = rLagrangianModelPart.Nodes().size();
//generate a tree with the position of the lagrangian nodes
// typedef Node < 3 > PointType;
// typedef Node < 3 > ::Pointer PointTypePointer;
//unsigned int min_number_of_particles = 1;
int id;
if (rLagrangianModelPart.Nodes().size() != 0)
id = (rLagrangianModelPart.NodesEnd() - 1)->Id();
else
id = 1;
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
el_it->SetValue(YOUNG_MODULUS,0.0);
}
for (ModelPart::NodesContainerType::iterator pparticle = rLagrangianModelPart.NodesBegin();
pparticle != rLagrangianModelPart.NodesEnd(); pparticle++)
{
pparticle->Set(TO_ERASE,false);;
}
//count particles that fall within an element
array_1d<double, TDim + 1 > N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
//count particles within an element
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
double& counter = pelement->GetValue(YOUNG_MODULUS);
#pragma omp atomic
counter += 1.0;
}
}
//erase particles within elements for which reseeding is needed
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
double& counter = pelement->GetValue(YOUNG_MODULUS);
if(counter < min_number_of_particles)
pparticle->Set(TO_ERASE,true);
else if(counter > max_number_of_particles) //delete if there are too many
{
#pragma omp atomic
counter -= 1;
pparticle->Set(TO_ERASE,true);
}
}
}
//perform the erase
NodeEraseProcess(rLagrangianModelPart).Execute();
int nafter_erase_particles = rLagrangianModelPart.Nodes().size();
std::cout << "n particles erased during reseed =" << nafter_erase_particles - ninitial_particles <<std::endl;
//now do reseed
#ifdef USE_FEW_PARTICLES
boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > pos;
boost::numeric::ublas::bounded_matrix<double, TDim + 2, TDim + 1 > Nnew;
#else
boost::numeric::ublas::bounded_matrix<double, 16, 3 > pos;
boost::numeric::ublas::bounded_matrix<double, 16, 3 > Nnew;
#endif
//if there are less than the number of particles we decide, reseed the element
for (ModelPart::ElementsContainerType::iterator el_it = rEulerianModelPart.ElementsBegin();
el_it != rEulerianModelPart.ElementsEnd(); el_it++)
{
if (el_it->GetValue(YOUNG_MODULUS) < min_number_of_particles)
{
Geometry< Node<3> >& geom = el_it->GetGeometry();
ComputeGaussPointPositions(geom, pos, Nnew);
for (unsigned int i = 0; i < pos.size1(); i++)
{
int node_id = id++;
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode(node_id, pos(i, 0), pos(i, 1), pos(i, 2));
array_1d<double, 3 > & vel = pnode->FastGetSolutionStepValue(VELOCITY);
noalias(vel) = ZeroVector(3);
for (unsigned int j = 0; j < TDim + 1; j++)
noalias(vel) += Nnew(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY);
array_1d<double, 3 > & vel_old = pnode->FastGetSolutionStepValue(VELOCITY, 1);
noalias(vel_old) = ZeroVector(3);
for (unsigned int j = 0; j < TDim + 1; j++)
noalias(vel_old) += Nnew(i, j) * geom[j].FastGetSolutionStepValue(VELOCITY, 1);
}
}
}
int nfinal_particles = rLagrangianModelPart.Nodes().size();
std::cout << "n particles added during reseed =" << nfinal_particles - ninitial_particles <<std::endl;
KRATOS_CATCH("");
}
//**********************************************************************************************
//**********************************************************************************************
void VisualizationModelPart(ModelPart& rCompleteModelPart, ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY;
rCompleteModelPart.Elements() = rEulerianModelPart.Elements();
rCompleteModelPart.Nodes() = rEulerianModelPart.Nodes();
unsigned int id;
if(rEulerianModelPart.Nodes().size()!= 0)
id = (rEulerianModelPart.Nodes().end() - 1)->Id() + 1;
else
id = 1;
//preallocate the memory needed
int tot_nodes = rEulerianModelPart.Nodes().size() + rLagrangianModelPart.Nodes().size();
rCompleteModelPart.Nodes().reserve( tot_nodes );
//note that here we renumber the nodes
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); node_it++)
{
rCompleteModelPart.AddNode(*(node_it.base()));
node_it->SetId(id++);
}
KRATOS_CATCH("");
}
//**********************************************************************************************
//**********************************************************************************************
void TransferToEulerianMesh(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart)
{
KRATOS_TRY
//defintions for spatial search
typedef Node < 3 > PointType;
typedef Node < 3 > ::Pointer PointTypePointer;
typedef std::vector<PointType::Pointer> PointVector;
typedef std::vector<PointType::Pointer>::iterator PointIterator;
typedef std::vector<double> DistanceVector;
typedef std::vector<double>::iterator DistanceIterator;
//creating an auxiliary list for the new nodes
PointVector list_of_nodes;
//*************
// Bucket types
typedef Bucket< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > BucketType;
// typedef Bins< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > StaticBins;
// typedef BinsDynamic< TDim, PointType, PointVector, PointTypePointer, PointIterator, DistanceIterator > DynamicBins;
//*************
// DynamicBins;
typedef Tree< KDTreePartition<BucketType> > tree; //Kdtree;
// typedef Tree< OCTreePartition<BucketType> > tree; //Octree;
// typedef Tree< StaticBins > tree; //Binstree;
// typedef Tree< KDTreePartition<StaticBins> > tree; //KdtreeBins;
// typedef typename KdtreeBins::Partitions SubPartitions;
// typedef Tree< OCTreePartition<StaticBins> > tree; //OctreeBins;
/*
typedef Bins< TDim, PointType, stdPointVector> stdBins;
typedef Tree< Bins<TDim,PointType,stdPointVector> > tree; //stdStaticBins;*/
//starting calculating time of construction of the kdtree
boost::timer kdtree_construction;
for (ModelPart::NodesContainerType::iterator node_it = rLagrangianModelPart.NodesBegin();
node_it != rLagrangianModelPart.NodesEnd(); ++node_it)
{
PointTypePointer pnode = *(node_it.base());
//putting the nodes of the destination_model part in an auxiliary list
list_of_nodes.push_back(pnode);
}
std::cout << "kdt constructin time " << kdtree_construction.elapsed() << std::endl;
//create a spatial database with the list of new nodes
unsigned int bucket_size = 20;
tree nodes_tree(list_of_nodes.begin(), list_of_nodes.end(), bucket_size);
//work arrays
Node < 3 > work_point(0, 0.0, 0.0, 0.0);
unsigned int MaximumNumberOfResults = 10000;
PointVector Results(MaximumNumberOfResults);
DistanceVector SquaredResultsDistances(MaximumNumberOfResults);
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(NODAL_H) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
double sigma = 0.0;
if (TDim == 2)
sigma = 10.0 / (7.0 * 3.1415926);
else
sigma = 1.0 / 3.1415926;
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin();
node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
work_point.X() = node_it->X();
work_point.Y() = node_it->Y();
work_point.Z() = node_it->Z();
double radius = 0.6 * node_it->FastGetSolutionStepValue(NODAL_H);
//find all of the new nodes within the radius
int number_of_points_in_radius;
//look between the new nodes which of them is inside the radius of the circumscribed cyrcle
number_of_points_in_radius = nodes_tree.SearchInRadius(work_point, radius, Results.begin(),
SquaredResultsDistances.begin(), MaximumNumberOfResults);
if (number_of_points_in_radius > 0)
{
array_1d<double, 3 > & vel = (node_it)->FastGetSolutionStepValue(VELOCITY);
double& temperature = (node_it)->FastGetSolutionStepValue(TEMPERATURE);
array_1d<double, 3 > original_vel = vel;
double original_temperature = temperature;
noalias(vel) = ZeroVector(3);
temperature = 0.0;
double tot_weight = 0.0;
for (int k = 0; k < number_of_points_in_radius; k++)
{
// double weight = 1.0;
double distance = sqrt(*(SquaredResultsDistances.begin() + k));
double weight = SPHCubicKernel(sigma, distance, radius);
// KRATOS_WATCH(weight);
// double weight = 1.0 / (sqrt(SquaredResultsDistances[k]) + 1e-9);
tot_weight += weight;
// tot_weight += 1.0;
PointIterator it_found = Results.begin() + k;
// array_1d<double,3> aux = (*it_found)->Coordinates()-node_it->Coordinates();
// KRATOS_WATCH(norm_2(aux));
// KRATOS_WATCH( *(SquaredResultsDistances.begin()+k) );
const array_1d<double, 3 > particle_velocity = (*it_found)->FastGetSolutionStepValue(VELOCITY);
const double particle_temperature = (*it_found)->FastGetSolutionStepValue(TEMPERATURE);
noalias(vel) += weight * particle_velocity;
temperature += weight * particle_temperature;
}
vel /= tot_weight;
temperature /= tot_weight;
if (node_it->IsFixed(VELOCITY_X))
{
noalias(vel) = original_vel;
}
if (node_it->IsFixed(TEMPERATURE))
temperature = original_temperature;
}
else
{
if (node_it->IsFixed(VELOCITY_X))
node_it->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3);
}
}
KRATOS_CATCH("")
}
//**********************************************************************************************
//**********************************************************************************************
void TransferToEulerianMeshShapeBased(ModelPart& rEulerianModelPart, ModelPart & rLagrangianModelPart, BinBasedFastPointLocator<TDim>& node_locator)
{
KRATOS_TRY
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(FORCE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----FORCE---- variable!!!!!! ERROR", "");
if (rEulerianModelPart.NodesBegin()->SolutionStepsDataHas(TEMPERATURE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add ----TEMPERATURE---- variable!!!!!! ERROR", "");
//defintions for spatial search
// typedef Node < 3 > PointType;
// typedef Node < 3 > ::Pointer PointTypePointer;
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin();
node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
if (node_it->IsFixed(VELOCITY_X) == false)
{
(node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3);
(node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0;
(node_it)->GetValue(YOUNG_MODULUS) = 0.0;
}
}
array_1d<double, TDim + 1 > N;
const int max_results = 10000;
typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results);
const int nparticles = rLagrangianModelPart.Nodes().size();
#pragma omp parallel for firstprivate(results,N)
for (int i = 0; i < nparticles; i++)
{
ModelPart::NodesContainerType::iterator iparticle = rLagrangianModelPart.NodesBegin() + i;
Node < 3 > ::Pointer pparticle = *(iparticle.base());
typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin();
Element::Pointer pelement;
bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results);
if (is_found == true)
{
Geometry<Node<3> >& geom = pelement->GetGeometry();
const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY);
const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE);
for (unsigned int k = 0; k < geom.size(); k++)
{
if (geom[k].IsFixed(VELOCITY_X) == false)
{
geom[k].SetLock();
geom[k].FastGetSolutionStepValue(VELOCITY) += N[k] * vel_particle;
geom[k].FastGetSolutionStepValue(TEMPERATURE) += N[k] * temperature_particle;
geom[k].GetValue(YOUNG_MODULUS) += N[k];
geom[k].UnSetLock();
}
}
}
}
for (ModelPart::NodesContainerType::iterator node_it = rEulerianModelPart.NodesBegin();
node_it != rEulerianModelPart.NodesEnd(); node_it++)
{
if (node_it->IsFixed(VELOCITY_X) == false)
{
const double NN = (node_it)->GetValue(YOUNG_MODULUS);
if (NN != 0.0)
{
(node_it)->FastGetSolutionStepValue(VELOCITY) /= NN;
(node_it)->FastGetSolutionStepValue(TEMPERATURE) /= NN;
}
else
{
std::cout << node_it->Id() << " coeff = " << NN << std::endl;
}
}
}
KRATOS_CATCH("")
}
//restarting the step from the beginning
void RestartStep(ModelPart & rModelPart)
{
KRATOS_TRY;
//setting the variables to their value at the beginning of the time step
rModelPart.OverwriteSolutionStepData(1, 0);
//setting the coordinates to their value at the beginning of the step
for (ModelPart::NodesContainerType::iterator node_it = rModelPart.NodesBegin();
node_it != rModelPart.NodesEnd(); node_it++)
{
array_1d<double, 3 > & coords = node_it->Coordinates();
const array_1d<double, 3 > & old_disp = node_it->FastGetSolutionStepValue(DISPLACEMENT, 1);
coords[0] = node_it->X0() + old_disp[0];
coords[1] = node_it->Y0() + old_disp[1];
coords[2] = node_it->Z0() + old_disp[2];
}
KRATOS_CATCH("");
}
private:
inline double SPHCubicKernel(const double sigma, const double r, const double hmax)
{
double h_half = 0.5 * hmax;
const double s = r / h_half;
const double coeff = sigma / pow(h_half, static_cast<int>(TDim));
if (s <= 1.0)
return coeff * (1.0 - 1.5 * s * s + 0.75 * s * s * s);
else if (s <= 2.0)
return 0.25 * coeff * pow(2.0 - s, 3);
else
return 0.0;
}
inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
xc = 0.3333333333333333333 * (x0 + x1 + x2);
yc = 0.3333333333333333333 * (y0 + y1 + y2);
zc = 0.0;
double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0);
double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1);
double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2);
R = R1;
if (R2 > R) R = R2;
if (R3 > R) R = R3;
R = 1.01 * sqrt(R);
}
//***************************************
//***************************************
inline void CalculateCenterAndSearchRadius(Geometry<Node < 3 > >&geom,
double& xc, double& yc, double& zc, double& R, array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
xc = 0.25 * (x0 + x1 + x2 + x3);
yc = 0.25 * (y0 + y1 + y2 + y3);
zc = 0.25 * (z0 + z1 + z2 + z3);
double R1 = (xc - x0)*(xc - x0) + (yc - y0)*(yc - y0) + (zc - z0)*(zc - z0);
double R2 = (xc - x1)*(xc - x1) + (yc - y1)*(yc - y1) + (zc - z1)*(zc - z1);
double R3 = (xc - x2)*(xc - x2) + (yc - y2)*(yc - y2) + (zc - z2)*(zc - z2);
double R4 = (xc - x3)*(xc - x3) + (yc - y3)*(yc - y3) + (zc - z3)*(zc - z3);
R = R1;
if (R2 > R) R = R2;
if (R3 > R) R = R3;
if (R4 > R) R = R4;
R = sqrt(R);
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
}
else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.0000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
}
else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
//***************************************
//***************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, boost::numeric::ublas::bounded_matrix<double, 4, 3 > & pos, boost::numeric::ublas::bounded_matrix<double, 4, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 1.0 / 6.0;
double two_third = 2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositions(Geometry< Node < 3 > >& geom, boost::numeric::ublas::bounded_matrix<double, 16, 3 > & pos, boost::numeric::ublas::bounded_matrix<double, 16, 3 > & N)
{
//lower diagonal terms
double ypos = 1.0 / 12.0;
int pos_counter = 0;
for (unsigned int i = 0; i < 4; i++)
{
double xpos = 1.0 / 12.0;
for (unsigned int j = 0; j < 4 - i; j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = 1.0 - xpos - ypos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
xpos += 1.0 / 4.0;
pos_counter += 1;
}
ypos += 1.0 / 4.0;
}
//lower diagonal terms
ypos = 2.0 / 12.0;
// pos_counter = 8;
for (unsigned int i = 0; i < 3; i++)
{
double xpos = 2.0 / 12.0;
for (unsigned int j = 0; j < 4 - i; j++)
{
double N1 = xpos;
double N2 = ypos;
double N3 = 1.0 - xpos - ypos;
pos(pos_counter, 0) = N1 * geom[0].X() + N2 * geom[1].X() + N3 * geom[2].X();
pos(pos_counter, 1) = N1 * geom[0].Y() + N2 * geom[1].Y() + N3 * geom[2].Y();
pos(pos_counter, 2) = N1 * geom[0].Z() + N2 * geom[1].Z() + N3 * geom[2].Z();
N(pos_counter, 0) = N1;
N(pos_counter, 1) = N2;
N(pos_counter, 2) = N3;
xpos += 1.0 / 4.0;
pos_counter += 1;
}
ypos += 1.0 / 4.0;
}
}
void ConsistentMassMatrix(const double A, boost::numeric::ublas::bounded_matrix<double, 3, 3 > & M)
{
double c1 = A / 12.0;
double c2 = 2.0 * c1;
M(0, 0) = c2;
M(0, 1) = c1;
M(0, 2) = c1;
M(1, 0) = c1;
M(1, 1) = c2;
M(1, 2) = c1;
M(2, 0) = c1;
M(2, 1) = c1;
M(2, 2) = c2;
}
};
} // namespace Kratos.
#endif // KRATOS_LAGRANGIAN_PARTICLES_UTILITIES_INCLUDED defined
|
ast-dump-openmp-begin-declare-variant_11.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=c_mode -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify=cxx_mode -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// c_mode-no-diagnostics
#ifdef __cplusplus
#define CONST constexpr
#else
#define CONST __attribute__((const))
#endif
#pragma omp begin declare variant match(implementation = {vendor(llvm)})
CONST int also_after1(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
static int also_after2(void) {
return 0;
}
__attribute__((nothrow)) int also_after3(void) {
return 0;
}
static CONST __attribute__((nothrow, always_inline)) __inline__ int also_after4(void) { // cxx_mode-note {{previous declaration is here}}
return 0;
}
#pragma omp end declare variant
int also_after1(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after1' follows constexpr declaration}}
return 1;
}
int also_after2(void) {
return 2;
}
int also_after3(void) {
return 3;
}
int also_after4(void) { // cxx_mode-error {{non-constexpr declaration of 'also_after4' follows constexpr declaration}}
return 4;
}
int main() {
// Should return 0.
return also_after1() + also_after2() + also_after3() + also_after4();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used also_after1 'int ({{.*}})'
// C-NEXT: | |-ConstAttr [[ADDR_1:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_2:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3:0x[a-z0-9]*]] <col:15> 'int ({{.*}})' Function [[ADDR_4:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_4]] <col:15, line:15:1> line:9:15 also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <line:13:29, line:15:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-ConstAttr [[ADDR_8:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: |-FunctionDecl [[ADDR_9:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_10:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_12:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_12]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// C-NEXT: | `-CompoundStmt [[ADDR_13:0x[a-z0-9]*]] <col:30, line:18:1>
// C-NEXT: | `-ReturnStmt [[ADDR_14:0x[a-z0-9]*]] <line:17:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_15:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: |-FunctionDecl [[ADDR_16:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}})'
// C-NEXT: | |-NoThrowAttr [[ADDR_17:0x[a-z0-9]*]] <col:16>
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_18:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_20:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_20]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:48, line:21:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:20:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-NoThrowAttr [[ADDR_24:0x[a-z0-9]*]] <line:19:16>
// C-NEXT: |-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used also_after4 'int ({{.*}})' static inline
// C-NEXT: | |-ConstAttr [[ADDR_26:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_27:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_28:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_31:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_31]] <col:1, line:24:1> line:22:1 also_after4[implementation={vendor(llvm)}] 'int ({{.*}})' static inline
// C-NEXT: | |-CompoundStmt [[ADDR_32:0x[a-z0-9]*]] <col:87, line:24:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_33:0x[a-z0-9]*]] <line:23:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_34:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | |-ConstAttr [[ADDR_35:0x[a-z0-9]*]] <line:9:30>
// C-NEXT: | |-NoThrowAttr [[ADDR_36:0x[a-z0-9]*]] <line:22:29>
// C-NEXT: | `-AlwaysInlineAttr [[ADDR_37:0x[a-z0-9]*]] <col:38> always_inline
// C-NEXT: |-FunctionDecl [[ADDR_38:0x[a-z0-9]*]] prev [[ADDR_0]] <line:27:1, line:29:1> line:27:5 used also_after1 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_39:0x[a-z0-9]*]] <col:23, line:29:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_40:0x[a-z0-9]*]] <line:28:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_41:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: | |-ConstAttr [[ADDR_42:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_43:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_44:0x[a-z0-9]*]] prev [[ADDR_9]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_45:0x[a-z0-9]*]] <col:23, line:32:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:31:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_47:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_48:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_11]] <line:16:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_49:0x[a-z0-9]*]] prev [[ADDR_16]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_50:0x[a-z0-9]*]] <col:23, line:35:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_51:0x[a-z0-9]*]] <line:34:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_52:0x[a-z0-9]*]] <col:10> 'int' 3
// C-NEXT: | |-NoThrowAttr [[ADDR_53:0x[a-z0-9]*]] <line:19:16> Inherited
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_54:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_55:0x[a-z0-9]*]] prev [[ADDR_25]] <line:36:1, line:38:1> line:36:5 used also_after4 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_56:0x[a-z0-9]*]] <col:23, line:38:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_57:0x[a-z0-9]*]] <line:37:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_58:0x[a-z0-9]*]] <col:10> 'int' 4
// C-NEXT: | |-ConstAttr [[ADDR_59:0x[a-z0-9]*]] <line:9:30> Inherited
// C-NEXT: | |-NoThrowAttr [[ADDR_60:0x[a-z0-9]*]] <line:22:29> Inherited
// C-NEXT: | |-AlwaysInlineAttr [[ADDR_61:0x[a-z0-9]*]] <col:38> Inherited always_inline
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_62:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-FunctionDecl [[ADDR_63:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_64:0x[a-z0-9]*]] <col:12, line:44:1>
// C-NEXT: `-ReturnStmt [[ADDR_65:0x[a-z0-9]*]] <line:43:3, col:70>
// C-NEXT: `-BinaryOperator [[ADDR_66:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_67:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// C-NEXT: | |-BinaryOperator [[ADDR_68:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// C-NEXT: | | |-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:10, col:22> 'int'
// C-NEXT: | | | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' Function [[ADDR_38]] 'also_after1' 'int ({{.*}})'
// C-NEXT: | | | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:9:15, line:43:22> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:9:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_3]] <col:15> 'int ({{.*}})' Function [[ADDR_4]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | | `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// C-NEXT: | | |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:26, col:38> 'int'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' Function [[ADDR_44]] 'also_after2' 'int ({{.*}})'
// C-NEXT: | | `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_11]] <col:1> 'int ({{.*}})' Function [[ADDR_12]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: | `-PseudoObjectExpr [[ADDR_81:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// C-NEXT: | |-CallExpr [[ADDR_82:0x[a-z0-9]*]] <col:42, col:54> 'int'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_83:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_84:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' Function [[ADDR_49]] 'also_after3' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_85:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_86:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_19]] <col:1> 'int ({{.*}})' Function [[ADDR_20]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: `-PseudoObjectExpr [[ADDR_87:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// C-NEXT: |-CallExpr [[ADDR_88:0x[a-z0-9]*]] <col:58, col:70> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_89:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_90:0x[a-z0-9]*]] <col:58> 'int ({{.*}})' Function [[ADDR_55]] 'also_after4' 'int ({{.*}})'
// C-NEXT: `-CallExpr [[ADDR_91:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_92:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_30]] <col:1> 'int ({{.*}})' Function [[ADDR_31]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:13:27> col:11 implicit used constexpr also_after1 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_3]] <col:15, line:15:1> line:7:15 constexpr also_after1[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <line:13:29, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:16:1, col:28> col:12 implicit used also_after2 'int ({{.*}})' static
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:18:1> line:16:1 also_after2[implementation={vendor(llvm)}] 'int ({{.*}})' static
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:30, line:18:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:17:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:19:1, col:46> col:30 implicit used also_after3 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_15:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17:0x[a-z0-9]*]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_17]] <col:1, line:21:1> line:19:1 also_after3[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: | `-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:48, line:21:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:20:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:22:1, col:85> col:69 implicit used constexpr also_after4 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_22:0x[a-z0-9]*]] <col:38> always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_23:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24:0x[a-z0-9]*]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25:0x[a-z0-9]*]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_25]] <col:1, line:24:1> line:22:1 constexpr also_after4[implementation={vendor(llvm)}] 'int ({{.*}}) __attribute__((nothrow))' static inline
// CXX-NEXT: | |-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:87, line:24:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:23:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-AlwaysInlineAttr [[ADDR_29:0x[a-z0-9]*]] <line:22:38> always_inline
// CXX-NEXT: |-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:27:1, line:29:1> line:27:5 invalid also_after1 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:23, line:29:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:28:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_33:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_34:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_2]] <line:7:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_35:0x[a-z0-9]*]] prev [[ADDR_7]] <line:30:1, line:32:1> line:30:5 used also_after2 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_36:0x[a-z0-9]*]] <col:23, line:32:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_37:0x[a-z0-9]*]] <line:31:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_38:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_39:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:16:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_40:0x[a-z0-9]*]] prev [[ADDR_14]] <line:33:1, line:35:1> line:33:5 used also_after3 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_41:0x[a-z0-9]*]] <col:23, line:35:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_42:0x[a-z0-9]*]] <line:34:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:10> 'int' 3
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_44:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <line:19:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: |-FunctionDecl [[ADDR_45:0x[a-z0-9]*]] <line:36:1, line:38:1> line:36:5 invalid also_after4 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_46:0x[a-z0-9]*]] <col:23, line:38:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_47:0x[a-z0-9]*]] <line:37:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_48:0x[a-z0-9]*]] <col:10> 'int' 4
// CXX-NEXT: | |-AlwaysInlineAttr [[ADDR_49:0x[a-z0-9]*]] <line:22:38> Inherited always_inline
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_50:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-FunctionDecl [[ADDR_51:0x[a-z0-9]*]] <line:41:1, line:44:1> line:41:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_52:0x[a-z0-9]*]] <col:12, line:44:1>
// CXX-NEXT: `-ReturnStmt [[ADDR_53:0x[a-z0-9]*]] <line:43:3, col:70>
// CXX-NEXT: `-BinaryOperator [[ADDR_54:0x[a-z0-9]*]] <col:10, col:70> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_55:0x[a-z0-9]*]] <col:10, col:54> 'int' '+'
// CXX-NEXT: | |-BinaryOperator [[ADDR_56:0x[a-z0-9]*]] <col:10, col:38> 'int' '+'
// CXX-NEXT: | | |-PseudoObjectExpr [[ADDR_57:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | |-CallExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:22> 'int'
// CXX-NEXT: | | | | `-ImplicitCastExpr [[ADDR_59:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | | `-DeclRefExpr [[ADDR_60:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_after1' 'int ({{.*}})'
// CXX-NEXT: | | | `-CallExpr [[ADDR_61:0x[a-z0-9]*]] <line:7:15, line:43:22> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_62:0x[a-z0-9]*]] <line:7:15> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_2]] <col:15> 'int ({{.*}})' Function [[ADDR_3]] 'also_after1[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | | `-PseudoObjectExpr [[ADDR_63:0x[a-z0-9]*]] <line:43:26, col:38> 'int'
// CXX-NEXT: | | |-CallExpr [[ADDR_64:0x[a-z0-9]*]] <col:26, col:38> 'int'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:26> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_66:0x[a-z0-9]*]] <col:26> 'int ({{.*}})' {{.*}}Function [[ADDR_35]] 'also_after2' 'int ({{.*}})'
// CXX-NEXT: | | `-CallExpr [[ADDR_67:0x[a-z0-9]*]] <line:16:1, line:43:38> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_68:0x[a-z0-9]*]] <line:16:1> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_9]] <col:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after2[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: | `-PseudoObjectExpr [[ADDR_69:0x[a-z0-9]*]] <line:43:42, col:54> 'int'
// CXX-NEXT: | |-CallExpr [[ADDR_70:0x[a-z0-9]*]] <col:42, col:54> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_71:0x[a-z0-9]*]] <col:42> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_72:0x[a-z0-9]*]] <col:42> 'int ({{.*}})' {{.*}}Function [[ADDR_40]] 'also_after3' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_73:0x[a-z0-9]*]] <line:19:1, line:43:54> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_74:0x[a-z0-9]*]] <line:19:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_16]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_17]] 'also_after3[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-PseudoObjectExpr [[ADDR_75:0x[a-z0-9]*]] <line:43:58, col:70> 'int'
// CXX-NEXT: |-CallExpr [[ADDR_76:0x[a-z0-9]*]] <col:58, col:70> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_77:0x[a-z0-9]*]] <col:58> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_78:0x[a-z0-9]*]] <col:58> 'int ({{.*}}) __attribute__((nothrow))' {{.*}}Function [[ADDR_21]] 'also_after4' 'int ({{.*}}) __attribute__((nothrow))'
// CXX-NEXT: `-CallExpr [[ADDR_79:0x[a-z0-9]*]] <line:22:1, line:43:70> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_80:0x[a-z0-9]*]] <line:22:1> 'int (*)({{.*}}) __attribute__((nothrow))' <FunctionToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_24]] <col:1> 'int ({{.*}}) __attribute__((nothrow))' Function [[ADDR_25]] 'also_after4[implementation={vendor(llvm)}]' 'int ({{.*}}) __attribute__((nothrow))'
|
mpi_room_assignment.c |
/* The following C code implements the room-assignment problem
* Written By : Sai Suraj(21560) and Suvam Tamang(21561)
* MTCS - 103(P) Mini - Project
*/
///////////////////////////////////////////////////////////////////// ALL THE HEADER FILES //////////////////////////////////////////////////////////////////////////
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<mpi.h>
///////////////////////////////////////////////////////// ALL THE FUNCTIONS DEFINITIONS AND PROTOTYPE ////////////////////////////////////////////////////////////////
float getRand(int min,int max);
void getMatrix(int**,int);
int check(int *,int );
int contains(int * a, int p, int n);
void genSolution(int * a, int n);
int getCost(int **,int *,int );
int* makeCopy(int *,int );
int getCost(int **,int *,int);
int room_assign(int **,int *,int *,int,int,int,int);
void Free(int *,int *,int **,int );
int getMin(int *a, int size);
int getIndex(int *a,int size);
//////////////////////////////////////////////////////////////////////////// MAIN FUNCTION //////////////////////////////////////////////////////////////////////////
int main(int argc,char *argv[]){
int rank, np;
int cost = 0;
int *aux,*a,**D;
double elapsed_time;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
//Opening the file passed as command line arguments for writing out the results
FILE *fp = fopen(argv[1],"w");
if (fp == NULL){
printf("Error in opening file!\n");
exit(-1);
}
FILE *fq = fopen(argv[2],"a");
if (fp == NULL){
printf("Error in opening file!\n");
exit(-1);
}
int nthreads = atoi(argv[3]);
int *results;
int newCost = 0;
MPI_Barrier(MPI_COMM_WORLD);
elapsed_time = -MPI_Wtime();
// Here n signifies the number of students
for(int n=10; n<=100; n+=2){
//int n = 20;
a = (int *)calloc(n,sizeof(int));
D = (int **)calloc(n,sizeof(int *));
for(int i=0;i<n;i++){
D[i] = (int *)calloc(n,sizeof(int));
}
if ( rank == 0 ){
// Generating the dislike matrix
getMatrix(D,n);
results = (int *)calloc(np,sizeof(int));
}
//Every process generates the initial random distribution
genSolution(a,n);
/*for(int i=0;i<n;i++){
printf("%d ",a[i]);
}
printf("\n");*/
//Making a copy of the input array for further requirements
aux = makeCopy(a,n);
//Broadcasting the Compatibility matrix to other processes
for (int i = 0; i < n; i++){
MPI_Bcast(D[i], n, MPI_INT, 0, MPI_COMM_WORLD);
}
//Calculating the intial cost
cost = getCost(D,a,n);
//Every Process performs the room assignment and calculates the optimal cost it finds as newCost
newCost = room_assign(D,a,aux,cost,n,rank,nthreads);
//Free(a,aux,D,n);
//Gathering the new cost calculated by each process
MPI_Gather(&newCost, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);
//The master process finally writes the results to the output file
if(rank==0){
fprintf(fp,"%d %d %d\n", n, cost , getMin(results, np));
}
/*printf("\n");
int id = getIndex(results,np);
if(rank==id){
for(int i=0;i<n;i++){
printf("%d ",a[i]);
}
}*/
Free(a,aux,D,n);
}
elapsed_time += MPI_Wtime();
if(rank==0){
fprintf(fq,"%d %lf\n",np,elapsed_time);
}
//End of the MPI Program
MPI_Finalize();
fclose(fp);
fclose(fq);
return 0;
}
/* End of the main program */
/* Function Name : getIndex
* Use : Returns the ID of the process containing the minimum value of the cost
*/
int getIndex(int *a,int n){
int min = 0;
for(int j=1;j<n;j++){
if(a[j]<=min){
min = j;
}
}
return min;
}
/* Function Name : getMin
* Use : iterates through the input array and returns the minimum element in it
*/
int getMin(int *a, int size){
int min = a[0];
for(int j = 1; j<size; j++)
if(a[j] <= min)
min = a[j];
return min;
}
/* Function Name : Free
* Use : used for deallocating the storage used for storing arrays and matrices
*/
void Free(int *a,int *aux,int **D,int n){
free(a);
free(aux);
for(int i=0;i<n;i++){
free(D[i]);}
free(D);
}
/* Function Name : getCost
* Use : computes the total cost depending on the configuration of the input array and returns it
*/
int getCost(int **D,int *a,int n){
int cost = 0;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(a[i] == a[j]){
cost += D[i][j];
}
}
}
return cost;
}
/* Function Name : room_assign
* Use : performs the permutation of students such that it results in the minimum possible cost and returns the cost
*/
int room_assign(int **D,int *a,int *aux,int cost,int n,int rank,int nthreads){
#pragma omp parallel num_threads(nthreads)
{
int temp;
float u1,u2;
int c1,c2;
int t=10;
srand(rank+time(0));
#pragma omp for
for(int i=0;i<1000;i++){
while(1){
u1 = getRand(0,1);
u2 = getRand(0,1);
c1 = floor(u1*n);
c2 = floor(u2*n);
if(a[c1]!=a[c2]) break;
}
temp = aux[c1];
aux[c1] = aux[c2];
aux[c2] = temp;
int newCost = getCost(D,aux,n);
int delta = newCost-cost;
double expo = exp( (double) (-delta)/t);
double u = ((double)rand()/(RAND_MAX));
if (delta < 0 || expo >= u ){
int t = a[c1];
a[c1] = a[c2];
a[c2] = t;
cost = newCost;
i = 0;
}
else{
temp = aux[c2];
aux[c2] = aux[c1];
aux[c1] = temp;
i = i + 1;
}
t = 0.999*t;
}
}
return cost;
}
/* Function Name : getRand
* Use : returns an int between [min, max[
*/
float getRand(int min, int max)
{
return (int) min + (rand() / (((double)RAND_MAX + 1)/ max));
}
/* Function Name : getMatrix
* Use : forms the dislike matrix
*/
void getMatrix(int **D,int size){
int num;
for(int i=0;i<size;i++){
for(int j=i;j<size;j++){
num = rand()%11;
D[i][j]=num;
D[j][i]=num;
}
}
}
/* Function Name : contains
* Use : checking if the input array already contains a certain element
*/
int contains(int * a, int p, int n)
{
for(int i = 0; i<n; i++){
if (a[i] == p) return 1;
}
return 0;
}
/* Function Name : genSolution
* Use : generates a random solution
*/
void genSolution(int * a, int n)
{
for(int i=0; i<n; i++){
if(a[i] == 0)
{
int p = rand() % (n/2) + 1;
if( !contains(a, p, n) )
{
a[i] = p;
a[i+(n/2)] = p;
}
else i--;
}
}
}
/* Function Name : makeCopy
* Use : used for making a copy of the input array and returning it
*/
int* makeCopy(int *a,int n){
int *res = (int *)calloc(n,sizeof(int));
for(int i=0;i<n;i++){
res[i]=a[i];
}
return res;
}
////////////////////////////////////////////////////////////////////////////// END //////////////////////////////////////////////////////////////////////////////////
|
sptree.h | /*
*
* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
/*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef SPTREE_H
#define SPTREE_H
#include <iostream>
#include <vector>
#include <unordered_map>
#include <map>
#ifdef __APPLE__
#include <dispatch/dispatch.h>
#endif
namespace hdi{
namespace dr{
//! Sparse Partitioning Tree used for the Barnes Hut approximation
/*!
Sparse Partitioning Tree used for the Barnes Hut approximation.
The original version was implemented by Laurens van der Maaten,
\author Laurens van der Maaten
\author Nicola Pezzotti
*/
template <typename scalar_type>
class SPTree{
public:
typedef double hp_scalar_type;
private:
class Cell {
unsigned int _emb_dimension;
hp_scalar_type* corner;
hp_scalar_type* width;
public:
Cell(unsigned int inp__emb_dimension);
Cell(unsigned int inp__emb_dimension, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
~Cell();
hp_scalar_type getCorner(unsigned int d);
hp_scalar_type getWidth(unsigned int d);
void setCorner(unsigned int d, hp_scalar_type val);
void setWidth(unsigned int d, hp_scalar_type val);
bool containsPoint(scalar_type point[]);
};
// Fixed constants
static const unsigned int QT_NODE_CAPACITY = 1;
// A buffer we use when doing force computations
//hp_scalar_type* buff;
// Properties of this node in the tree
SPTree* parent;
unsigned int _emb_dimension;
bool is_leaf;
unsigned int size;
unsigned int cum_size;
// Axis-aligned bounding box stored as a center with half-_emb_dimensions to represent the boundaries of this quad tree
Cell* boundary;
// Indices in this space-partitioning tree node, corresponding center-of-mass, and list of all children
scalar_type* _emb_positions;
hp_scalar_type* _center_of_mass;
unsigned int index[QT_NODE_CAPACITY];
// Children
SPTree** children;
unsigned int no_children;
public:
SPTree(unsigned int D, scalar_type* inp_data, unsigned int N);
private:
SPTree(unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
SPTree(unsigned int D, scalar_type* inp_data, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
SPTree(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, unsigned int N, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
SPTree(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
public:
~SPTree();
void setData(scalar_type* inp_data);
SPTree* getParent();
void construct(Cell boundary);
bool insert(unsigned int new_index);
void subdivide();
bool isCorrect();
void rebuildTree();
void getAllIndices(unsigned int* indices);
unsigned int getDepth();
void computeNonEdgeForcesOMP(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type& sum_Q)const;
void computeNonEdgeForces(unsigned int point_index, hp_scalar_type theta, hp_scalar_type neg_f[], hp_scalar_type* sum_Q)const;
void computeEdgeForces(unsigned int* row_P, unsigned int* col_P, hp_scalar_type* val_P, hp_scalar_type sum_P, int N, hp_scalar_type* pos_f)const;
template <typename sparse_scalar_matrix>
void computeEdgeForces(const sparse_scalar_matrix& matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const;
void print();
private:
void init(SPTree* inp_parent, unsigned int D, scalar_type* inp_data, hp_scalar_type* inp_corner, hp_scalar_type* inp_width);
void fill(unsigned int N);
unsigned int getAllIndices(unsigned int* indices, unsigned int loc);
bool isChild(unsigned int test_index, unsigned int start, unsigned int end);
};
/////////////////////////////////////////////////////////////////////////
template <typename scalar_type>
template <typename sparse_scalar_matrix>
void SPTree<scalar_type>::computeEdgeForces(const sparse_scalar_matrix& sparse_matrix, hp_scalar_type multiplier, hp_scalar_type* pos_f)const{
const int n = sparse_matrix.size();
// Loop over all edges in the graph
#ifdef __APPLE__
//std::cout << "GCD dispatch, sptree 180.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j) {
#endif //__APPLE__
std::vector<hp_scalar_type> buff(_emb_dimension,0);
unsigned int ind1, ind2;
hp_scalar_type q_ij_1;
ind1 = j * _emb_dimension;
for(auto elem: sparse_matrix[j]) {
// Compute pairwise distance and Q-value
q_ij_1 = 1.0;
ind2 = elem.first * _emb_dimension;
for(unsigned int d = 0; d < _emb_dimension; d++)
buff[d] = _emb_positions[ind1 + d] - _emb_positions[ind2 + d]; //buff contains (yi-yj) per each _emb_dimension
for(unsigned int d = 0; d < _emb_dimension; d++)
q_ij_1 += buff[d] * buff[d];
hp_scalar_type p_ij = elem.second;
hp_scalar_type res = hp_scalar_type(p_ij) * multiplier / q_ij_1 / n;
// Sum positive force
for(unsigned int d = 0; d < _emb_dimension; d++)
pos_f[ind1 + d] += res * buff[d] * multiplier; //(p_ij*q_j*mult) * (yi-yj)
}
}
#ifdef __APPLE__
);
#endif
}
}
}
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int check_results(double* A){
for (int i = 0 ; i < N ; i++){
if (A[i] != TRIALS){
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
return 0;
}
}
return 1;
}
int check_results_priv(double *A, double *B){
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*3) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
return 0;
}
if (B[i] != TRIALS*7) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
return 0;
}
}
return 1;
}
#define CODE() \
ZERO(A); \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target") \
_Pragma("omp teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
A[i] += C[i]; \
} \
} \
success += check_results(A); \
if (success == expected) \
printf("Succeeded\n");
#define CODE_PRIV() \
ZERO(A); \
ZERO(B); \
p = 2.0; \
q = 4.0; \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target") \
_Pragma("omp teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
p = 3; \
q = 7; \
A[i] += p; \
B[i] += q; \
} \
} \
success += check_results_priv(A, B); \
if (success == expected) \
printf("Succeeded\n");
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
int expected = 1;
int success = 0;
int chunkSize;
double p = 2.0, q = 4.0;
int nte, tl, blockSize;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
printf("iterations = teams\n");
#define CLAUSES num_teams(992)
CODE()
#undef CLAUSES
printf("iterations > teams\n");
#define CLAUSES num_teams(256)
CODE()
#undef CLAUSES
printf("iterations < teams\n");
#define CLAUSES num_teams(1024)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,1)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,512)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 512)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(512) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(1024) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(1024) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 3)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 3)
CODE()
#undef CLAUSES
printf("num_teams(10) dist_schedule(static, 99)\n");
#define CLAUSES num_teams(10) dist_schedule(static, 99)
CODE()
#undef CLAUSES
printf("num_teams(256) dist_schedule(static, 992)\n");
#define CLAUSES num_teams(256) dist_schedule(static, 992)
CODE()
#undef CLAUSES
#if 0
printf("num_teams(256) private(p,q)\n");
#define CLAUSES num_teams(256) private(p,q)
CODE_PRIV()
#undef CLAUSES
#endif
//
// Test: firstprivate
//
#if 0
printf("num_teams(64) firstprivate(p, q)\n");
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation
#pragma omp teams distribute simd num_teams(64) firstprivate(p, q)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
//
// Test: lastprivate
//
#if 0
printf("num_teams(10) lastprivate(lastpriv)\n");
success = 0;
int lastpriv = -1;
#pragma omp target map(tofrom:lastpriv)
#pragma omp teams distribute simd num_teams(10) lastprivate(lastpriv)
for(int i = 0 ; i < omp_get_num_teams() ; i++)
lastpriv = omp_get_team_num();
if(lastpriv != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv, 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
// // ***************************
// // Series 4: with parallel for
// // ***************************
//
// Test: simple blocking loop
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 256 ; j += blockSize) {
for(int i = j ; i < j+blockSize; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: blocking loop where upper bound is not a multiple of tl*nte
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target
#pragma omp teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 510 ; j += blockSize) {
int ub = (j+blockSize < 510) ? (j+blockSize) : 512;
for(int i = j ; i < ub; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 5: collapse
// **************************
//
// Test: 2 loops
//
printf("num_teams(512) collapse(2)\n");
success = 0;
double * S = (double *) malloc(N*N*sizeof(double));
double * T = (double *) malloc(N*N*sizeof(double));
double * U = (double *) malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N])
#pragma omp teams distribute simd num_teams(512) collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
printf("num_teams(512) collapse(3)\n");
success = 0;
int M = N/8;
double * V = (double *) malloc(M*M*M*sizeof(double));
double * Z = (double *) malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M])
#pragma omp teams distribute simd num_teams(512) collapse(3)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
omp_masked.c | <ompts:test>
<ompts:testdescription>Test which checks the omp masked directive by counting up a variable in a omp masked section.</ompts:testdescription>
<ompts:ompversion>5.1</ompts:ompversion>
<ompts:directive>omp masked</ompts:directive>
<ompts:dependences>omp critical</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
/*
* TODO not checked up to now: no implied barrier, check threads of team
*/
int <ompts:testcode:functionname>omp_masked</ompts:testcode:functionname>(FILE * logFile)
{
<ompts:orphan:vars>
int nthreads;
int executing_thread;
</ompts:orphan:vars>
nthreads = 0;
executing_thread = -1;
#pragma omp parallel
{
<ompts:orphan>
<ompts:check>#pragma omp masked</ompts:check>
{
#pragma omp critical
{
nthreads++;
}
executing_thread = omp_get_thread_num();
} /* end of master*/
</ompts:orphan>
} /* end of parallel*/
printf("Number of threads in block: %d\n", nthreads);
printf("Executing thread: %d\n", executing_thread);
return ((nthreads == 1) && (executing_thread == 0));
}
</ompts:testcode>
</ompts:test>
|
ast-dump-openmp-critical.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp critical
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-critical.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPCriticalDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-critical.c:4:1) *const restrict'
|
GB_unop__bnot_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__bnot_int8_int8)
// op(A') function: GB (_unop_tran__bnot_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__bnot_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__bnot_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isgt_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isgt_int32
// A.*B function (eWiseMult): GB_AemultB__isgt_int32
// A*D function (colscale): GB_AxD__isgt_int32
// D*A function (rowscale): GB_DxB__isgt_int32
// C+=B function (dense accum): GB_Cdense_accumB__isgt_int32
// C+=b function (dense accum): GB_Cdense_accumb__isgt_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int32
// C=scalar+B GB_bind1st__isgt_int32
// C=scalar+B' GB_bind1st_tran__isgt_int32
// C=A+scalar GB_bind2nd__isgt_int32
// C=A'+scalar GB_bind2nd_tran__isgt_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isgt_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isgt_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isgt_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isgt_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isgt_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isgt_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isgt_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isgt_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isgt_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB_bind1st_tran__isgt_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB_bind2nd_tran__isgt_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
workflow.h | /**
* @file workflow.h
* @brief The main workflow for abess.
* @details It receives all inputs from API, runs the whole abess process
* and then return the results as a list.
*/
#ifndef SRC_WORKFLOW_H
#define SRC_WORKFLOW_H
// #define R_BUILD
#ifdef R_BUILD
#include <Rcpp.h>
#include <RcppEigen.h>
// [[Rcpp::depends(RcppEigen)]]
using namespace Rcpp;
#else
#include <Eigen/Eigen>
#include "List.h"
#endif
#include <iostream>
#include <vector>
#include "Algorithm.h"
#include "Data.h"
#include "Metric.h"
#include "abessOpenMP.h"
#include "path.h"
#include "screening.h"
#include "utilities.h"
typedef Eigen::Triplet<double> triplet;
using namespace Eigen;
using namespace std;
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::MatrixXd> for Univariate Dense
// <Eigen::VectorXd, Eigen::VectorXd, double, Eigen::SparseMatrix<double> > for Univariate Sparse
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::MatrixXd> for Multivariable Dense
// <Eigen::MatrixXd, Eigen::MatrixXd, Eigen::VectorXd, Eigen::SparseMatrix<double> > for Multivariable Sparse
/**
* @brief The main workflow for abess.
* @tparam T1 for y, XTy, XTone
* @tparam T2 for beta
* @tparam T3 for coef0
* @tparam T4 for X
* @param x sample matrix
* @param y response matrix
* @param n sample size
* @param p number of variables
* @param normalize_type type of normalize
* @param weight weight of each sample
* @param algorithm_type type of algorithm
* @param path_type type of path: 1 for sequencial search and 2 for golden section search
* @param is_warm_start whether enable warm-start
* @param ic_type type of information criterion, used for not CV
* @param Kfold number of folds, used for CV
* @param parameters parameters to be selected, including `support_size`, `lambda`
* @param screening_size size of screening
* @param g_index the first position of each group
* @param early_stop whether enable early-stop
* @param thread number of threads used for parallel computing
* @param sparse_matrix whether sample matrix `x` is sparse matrix
* @param cv_fold_id user-specified cross validation division
* @param A_init initial active set
* @param algorithm_list the algorithm pointer
* @return the result of abess, including the best model parameters
*/
template <class T1, class T2, class T3, class T4>
List abessWorkflow(T4 &x, T1 &y, int n, int p, int normalize_type, Eigen::VectorXd weight, int algorithm_type,
int path_type, bool is_warm_start, int ic_type, double ic_coef, int Kfold, Parameters parameters,
int screening_size, Eigen::VectorXi g_index, bool early_stop, int thread, bool sparse_matrix,
Eigen::VectorXi &cv_fold_id, Eigen::VectorXi &A_init,
vector<Algorithm<T1, T2, T3, T4> *> algorithm_list) {
#ifndef R_BUILD
std::srand(123);
#endif
int algorithm_list_size = algorithm_list.size();
// Size of the candidate set:
// usually it is equal to `p`, the number of variable,
// but it could be different in e.g. RPCA.
int beta_size = algorithm_list[0]->get_beta_size(n, p);
// Data packing & normalize:
// pack & initial all information of data,
// including normalize.
Data<T1, T2, T3, T4> data(x, y, normalize_type, weight, g_index, sparse_matrix, beta_size);
if (algorithm_list[0]->model_type == 1 || algorithm_list[0]->model_type == 5) {
add_weight(data.x, data.y, data.weight);
}
// Screening:
// if there are too many noise variables,
// screening can choose the `screening_size` most important variables
// and then focus on them later.
Eigen::VectorXi screening_A;
if (screening_size >= 0) {
screening_A = screening<T1, T2, T3, T4>(data, algorithm_list, screening_size, beta_size,
parameters.lambda_list(0), A_init);
}
// Prepare for CV:
// if CV is enable,
// specify train and test data,
// and initialize the fitting argument inside each fold.
Metric<T1, T2, T3, T4> *metric = new Metric<T1, T2, T3, T4>(ic_type, ic_coef, Kfold);
if (Kfold > 1) {
metric->set_cv_train_test_mask(data, data.n, cv_fold_id);
metric->set_cv_init_fit_arg(beta_size, data.M);
// metric->set_cv_initial_model_param(Kfold, data.p);
// metric->set_cv_initial_A(Kfold, data.p);
// metric->set_cv_initial_coef0(Kfold, data.p);
// if (model_type == 1)
// metric->cal_cv_group_XTX(data);
}
// Fitting and loss:
// follow the search path,
// fit on each parameter combination,
// and calculate ic/loss.
vector<Result<T2, T3>> result_list(Kfold);
if (path_type == 1) {
// sequentical search
#pragma omp parallel for
for (int i = 0; i < Kfold; i++) {
sequential_path_cv<T1, T2, T3, T4>(data, algorithm_list[i], metric, parameters, early_stop, i, A_init,
result_list[i]);
}
} else {
// if (algorithm_type == 5 || algorithm_type == 3)
// {
// double log_lambda_min = log(max(lambda_min, 1e-5));
// double log_lambda_max = log(max(lambda_max, 1e-5));
// result = pgs_path(data, algorithm, metric, s_min, s_max, log_lambda_min, log_lambda_max, powell_path,
// nlambda);
// }
// golden section search
gs_path<T1, T2, T3, T4>(data, algorithm_list, metric, parameters, A_init, result_list);
}
for (int k = 0; k < Kfold; k++) {
algorithm_list[k]->clear_setting();
}
// Get bestmodel && fit bestmodel:
// choose the best model with lowest ic/loss
// and if CV, refit on full data.
int min_loss_index = 0;
int sequence_size = (parameters.sequence).size();
Eigen::Matrix<T2, Dynamic, 1> beta_matrix(sequence_size, 1);
Eigen::Matrix<T3, Dynamic, 1> coef0_matrix(sequence_size, 1);
Eigen::Matrix<VectorXd, Dynamic, 1> bd_matrix(sequence_size, 1);
Eigen::MatrixXd ic_matrix(sequence_size, 1);
Eigen::MatrixXd test_loss_sum = Eigen::MatrixXd::Zero(sequence_size, 1);
Eigen::MatrixXd train_loss_matrix(sequence_size, 1);
Eigen::MatrixXd effective_number_matrix(sequence_size, 1);
if (Kfold == 1) {
// not CV: choose lowest ic
beta_matrix = result_list[0].beta_matrix;
coef0_matrix = result_list[0].coef0_matrix;
ic_matrix = result_list[0].ic_matrix;
train_loss_matrix = result_list[0].train_loss_matrix;
effective_number_matrix = result_list[0].effective_number_matrix;
ic_matrix.col(0).minCoeff(&min_loss_index);
} else {
// CV: choose lowest test loss
for (int i = 0; i < Kfold; i++) {
test_loss_sum += result_list[i].test_loss_matrix;
}
test_loss_sum /= ((double)Kfold);
test_loss_sum.col(0).minCoeff(&min_loss_index);
Eigen::VectorXi used_algorithm_index = Eigen::VectorXi::Zero(algorithm_list_size);
// refit on full data
#pragma omp parallel for
for (int ind = 0; ind < sequence_size; ind++) {
int support_size = parameters.sequence(ind).support_size;
double lambda = parameters.sequence(ind).lambda;
int algorithm_index = omp_get_thread_num();
used_algorithm_index(algorithm_index) = 1;
T2 beta_init;
T3 coef0_init;
Eigen::VectorXi A_init; // start from a clear A_init (not from the given one)
coef_set_zero(beta_size, data.M, beta_init, coef0_init);
Eigen::VectorXd bd_init = Eigen::VectorXd::Zero(data.g_num);
// warmstart from CV's result
for (int j = 0; j < Kfold; j++) {
beta_init = beta_init + result_list[j].beta_matrix(ind) / Kfold;
coef0_init = coef0_init + result_list[j].coef0_matrix(ind) / Kfold;
bd_init = bd_init + result_list[j].bd_matrix(ind) / Kfold;
}
// fitting
algorithm_list[algorithm_index]->update_sparsity_level(support_size);
algorithm_list[algorithm_index]->update_lambda_level(lambda);
algorithm_list[algorithm_index]->update_beta_init(beta_init);
algorithm_list[algorithm_index]->update_coef0_init(coef0_init);
algorithm_list[algorithm_index]->update_bd_init(bd_init);
algorithm_list[algorithm_index]->update_A_init(A_init, data.g_num);
algorithm_list[algorithm_index]->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p,
data.g_num);
// update results
beta_matrix(ind) = algorithm_list[algorithm_index]->get_beta();
coef0_matrix(ind) = algorithm_list[algorithm_index]->get_coef0();
train_loss_matrix(ind) = algorithm_list[algorithm_index]->get_train_loss();
ic_matrix(ind) = metric->ic(data.n, data.M, data.g_num, algorithm_list[algorithm_index]);
effective_number_matrix(ind) = algorithm_list[algorithm_index]->get_effective_number();
}
for (int i = 0; i < algorithm_list_size; i++) {
if (used_algorithm_index(i) == 1) {
algorithm_list[i]->clear_setting();
}
}
}
// Best result
double best_support_size = parameters.sequence(min_loss_index).support_size;
double best_lambda = parameters.sequence(min_loss_index).lambda;
T2 best_beta;
T3 best_coef0;
double best_train_loss, best_ic, best_test_loss;
best_beta = beta_matrix(min_loss_index);
best_coef0 = coef0_matrix(min_loss_index);
best_train_loss = train_loss_matrix(min_loss_index);
best_ic = ic_matrix(min_loss_index);
best_test_loss = test_loss_sum(min_loss_index);
// Restore for normal:
// restore the changes if normalization is used.
restore_for_normal<T2, T3>(best_beta, best_coef0, beta_matrix, coef0_matrix, sparse_matrix, data.normalize_type,
data.n, data.x_mean, data.y_mean, data.x_norm);
// Store in a list for output
List out_result;
#ifdef R_BUILD
out_result = List::create(
Named("beta") = best_beta, Named("coef0") = best_coef0, Named("train_loss") = best_train_loss,
Named("ic") = best_ic, Named("lambda") = best_lambda, Named("beta_all") = beta_matrix,
Named("coef0_all") = coef0_matrix, Named("train_loss_all") = train_loss_matrix, Named("ic_all") = ic_matrix,
Named("effective_number_all") = effective_number_matrix, Named("test_loss_all") = test_loss_sum);
if (path_type == 2) {
out_result.push_back(parameters.support_size_list, "sequence");
}
#else
out_result.add("beta", best_beta);
out_result.add("coef0", best_coef0);
out_result.add("train_loss", best_train_loss);
out_result.add("test_loss", best_test_loss);
out_result.add("ic", best_ic);
out_result.add("lambda", best_lambda);
// out_result.add("beta_all", beta_matrix);
// out_result.add("coef0_all", coef0_matrix);
// out_result.add("train_loss_all", train_loss_matrix);
// out_result.add("ic_all", ic_matrix);
// out_result.add("test_loss_all", test_loss_sum);
#endif
// Restore for screening
// restore the changes if screening is used.
if (screening_size >= 0) {
T2 beta_screening_A;
T2 beta;
T3 coef0;
beta_size = algorithm_list[0]->get_beta_size(n, p);
coef_set_zero(beta_size, data.M, beta, coef0);
#ifndef R_BUILD
out_result.get_value_by_name("beta", beta_screening_A);
slice_restore(beta_screening_A, screening_A, beta);
out_result.add("beta", beta);
out_result.add("screening_A", screening_A);
#else
beta_screening_A = out_result["beta"];
slice_restore(beta_screening_A, screening_A, beta);
out_result["beta"] = beta;
out_result.push_back(screening_A, "screening_A");
#endif
}
delete metric;
// Return the results
return out_result;
}
#endif // SRC_WORKFLOW_H
|
random.h | /*
* This file is part of Quantum++.
*
* MIT License
*
* Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* \file random.h
* \brief Randomness-related functions
*/
#ifndef RANDOM_H_
#define RANDOM_H_
namespace qpp {
/**
* \brief Generates a random real number uniformly distributed in the interval
* [a, b)
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real number (double) uniformly distributed in the interval
* [a, b)
*/
inline double rand(double a, double b) {
// EXCEPTION CHECKS
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_real_distribution<> ud(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return ud(gen);
}
/**
* \brief Generates a random big integer uniformly distributed in the interval
* [a, b]
*
* \note To avoid ambiguity with double qpp::rand(double, double) cast at least
* one of the arguments to qpp::bigint
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random big integer uniformly distributed in the interval [a, b]
*/
inline bigint rand(bigint a, bigint b) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<bigint> uid(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return uid(gen);
}
/**
* \brief Generates a random index (idx) uniformly distributed in the interval
* [a, b]
*
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, belongs to it
* \return Random index (idx) uniformly distributed in the interval [a, b]
*/
inline idx randidx(idx a = std::numeric_limits<idx>::min(),
idx b = std::numeric_limits<idx>::max()) {
// EXCEPTION CHECKS
if (a > b)
throw exception::OutOfRange("qpp::randidx()");
// END EXCEPTION CHECKS
std::uniform_int_distribution<idx> uid(a, b);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return uid(gen);
}
/**
* \brief Generates a random matrix with entries uniformly distributed in the
* interval [a, b)
*
* If complex, then both real and imaginary parts are uniformly distributed in
* [a, b)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for qpp::dmat
* and qpp::cmat
*/
template <typename Derived>
Derived rand(idx rows QPP_UNUSED_, idx cols QPP_UNUSED_,
double a QPP_UNUSED_ = 0, double b QPP_UNUSED_ = 1) {
throw exception::UndefinedType("qpp::rand()");
}
/**
* \brief Generates a random real matrix with entries uniformly distributed in
* the interval [a, b), specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries uniformly distributed in [-1,1)
* dmat mat = rand<dmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random real matrix
*/
template <>
inline dmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return dmat::Zero(rows, cols).unaryExpr([a, b](double) {
return rand(a, b);
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) uniformly distributed in the interval [a, b), specialization for
* complex matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) uniformly distributed in [-1,1)
* cmat mat = rand<cmat>(3, 3, -1, 1);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param a Beginning of the interval, belongs to it
* \param b End of the interval, does not belong to it
* \return Random complex matrix
*/
template <>
inline cmat rand(idx rows, idx cols, double a, double b) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::rand()");
if (a >= b)
throw exception::OutOfRange("qpp::rand()");
// END EXCEPTION CHECKS
return rand<dmat>(rows, cols, a, b).cast<cplx>() +
1_i * rand<dmat>(rows, cols, a, b).cast<cplx>();
}
/**
* \brief Generates a random matrix with entries normally distributed in
* N(mean, sigma)
*
* If complex, then both real and imaginary parts are normally distributed in
* N(mean, sigma)
*
* This is the generic version that always throws
* qpp::Exception::Type::UNDEFINED_TYPE. It is specialized only for qpp::dmat
* and qpp::cmat
*/
template <typename Derived>
Derived randn(idx rows QPP_UNUSED_, idx cols QPP_UNUSED_,
double mean QPP_UNUSED_ = 0, double sigma QPP_UNUSED_ = 1) {
throw exception::UndefinedType("qpp::randn()");
}
/**
* \brief Generates a random real matrix with entries normally distributed in
* N(mean, sigma), specialization for double matrices (qpp::dmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXd,
* // with entries normally distributed in N(0,2)
* dmat mat = randn<dmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random real matrix
*/
template <>
inline dmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
std::normal_distribution<> nd(mean, sigma);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return dmat::Zero(rows, cols).unaryExpr([&nd, &gen](double) {
return nd(gen);
});
}
/**
* \brief Generates a random complex matrix with entries (both real and
* imaginary) normally distributed in N(mean, sigma), specialization for complex
* matrices (qpp::cmat)
*
* The template parameter cannot be automatically deduced and must be explicitly
* provided
*
* Example:
* \code
* // generates a 3 x 3 random Eigen::MatrixXcd,
* // with entries (both real and imaginary) normally distributed in N(0,2)
* cmat mat = randn<cmat>(3, 3, 0, 2);
* \endcode
*
* \param rows Number of rows of the random generated matrix
* \param cols Number of columns of the random generated matrix
* \param mean Mean
* \param sigma Standard deviation
* \return Random complex matrix
*/
template <>
inline cmat randn(idx rows, idx cols, double mean, double sigma) {
// EXCEPTION CHECKS
if (rows == 0 || cols == 0)
throw exception::ZeroSize("qpp::randn()");
// END EXCEPTION CHECKS
return randn<dmat>(rows, cols, mean, sigma).cast<cplx>() +
1_i * randn<dmat>(rows, cols, mean, sigma).cast<cplx>();
}
/**
* \brief Generates a random real number (double) normally distributed in
* N(mean, sigma)
*
* \param mean Mean
* \param sigma Standard deviation
* \return Random real number normally distributed in N(mean, sigma)
*/
inline double randn(double mean = 0, double sigma = 1) {
std::normal_distribution<> nd(mean, sigma);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
return nd(gen);
}
/**
* \brief Generates a random unitary matrix
*
* \param D Dimension of the Hilbert space
* \return Random unitary
*/
inline cmat randU(idx D = 2)
// ~3 times slower than Toby Cubitt's MATLAB corresponding routine,
// because Eigen 3 QR algorithm is not parallelized
{
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randU()");
// END EXCEPTION CHECKS
cmat X = 1 / std::sqrt(2.) * randn<cmat>(D, D);
Eigen::HouseholderQR<cmat> qr(X);
cmat Q = qr.householderQ();
// phase correction so that the resultant matrix is
// uniformly distributed according to the Haar measure
Eigen::VectorXcd phases = (rand<dmat>(D, 1)).cast<cplx>();
for (idx i = 0; i < static_cast<idx>(phases.rows()); ++i)
phases(i) = std::exp(2 * pi * 1_i * phases(i));
Q = Q * phases.asDiagonal();
return Q;
}
/**
* \brief Generates a random isometry matrix
*
* \param Din Size of the input Hilbert space
* \param Dout Size of the output Hilbert space
* \return Random isometry matrix
*/
inline cmat randV(idx Din, idx Dout) {
// EXCEPTION CHECKS
if (Din == 0 || Dout == 0 || Din > Dout)
throw exception::DimsInvalid("qpp::randV()");
// END EXCEPTION CHECKS
return randU(Dout).block(0, 0, Dout, Din);
}
/**
* \brief Generates a set of random Kraus operators
*
* \note The set of Kraus operators satisfy the closure condition
* \f$ \sum_i K_i^\dagger K_i = I\f$
*
* \param N Number of Kraus operators
* \param D Dimension of the Hilbert space
* \return Set of \a N Kraus operators satisfying the closure condition
*/
inline std::vector<cmat> randkraus(idx N, idx D = 2) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randkraus()");
if (D == 0)
throw exception::DimsInvalid("qpp::randkraus()");
// END EXCEPTION CHECKS
std::vector<cmat> result(N);
for (idx i = 0; i < N; ++i)
result[i] = cmat::Zero(D, D);
cmat Fk(D, D);
cmat U = randU(N * D);
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(3)
#endif // WITH_OPENMP_
for (idx k = 0; k < N; ++k)
for (idx a = 0; a < D; ++a)
for (idx b = 0; b < D; ++b)
result[k](a, b) = U(a * N + k, b * N);
return result;
}
/**
* \brief Generates a random Hermitian matrix
*
* \param D Dimension of the Hilbert space
* \return Random Hermitian matrix
*/
inline cmat randH(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randH()");
// END EXCEPTION CHECKS
cmat H = 2 * rand<cmat>(D, D) - (1. + 1_i) * cmat::Ones(D, D);
return H + H.adjoint();
}
/**
* \brief Generates a random normalized ket (pure state vector)
*
* \param D Dimension of the Hilbert space
* \return Random normalized ket
*/
inline ket randket(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randket()");
// END EXCEPTION CHECKS
/* slow
ket kt = ket::Ones(D);
ket result = static_cast<ket>(randU(D) * kt);
return result;
*/
ket kt = randn<cmat>(D, 1);
return kt / kt.norm();
}
/**
* \brief Generates a random density matrix
*
* \param D Dimension of the Hilbert space
* \return Random density matrix
*/
inline cmat randrho(idx D = 2) {
// EXCEPTION CHECKS
if (D == 0)
throw exception::DimsInvalid("qpp::randrho()");
// END EXCEPTION CHECKS
cmat result = 10 * randH(D);
result = result * result.adjoint();
return result / result.trace();
}
/**
* \brief Generates a random uniformly distributed permutation
*
* Uses Knuth shuffle method (as implemented by std::shuffle), so that all
* permutations are equally probable
*
* \param N Size of the permutation
* \return Random permutation of size \a N
*/
inline std::vector<idx> randperm(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::PermInvalid("qpp::randperm()");
// END EXCEPTION CHECKS
std::vector<idx> result(N);
// fill in increasing order
std::iota(std::begin(result), std::end(result), 0);
// shuffle
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
std::shuffle(std::begin(result), std::end(result), gen);
return result;
}
/**
* \brief Generates a random probability vector uniformly distributed over the
* probability simplex
*
* \param N Size of the probability vector
* \return Random probability vector
*/
inline std::vector<double> randprob(idx N) {
// EXCEPTION CHECKS
if (N == 0)
throw exception::OutOfRange("qpp::randprob()");
// END EXCEPTION CHECKS
std::vector<double> result(N);
// generate
std::exponential_distribution<> ed(1);
auto& gen =
#ifdef NO_THREAD_LOCAL_
RandomDevices::get_instance().get_prng();
#else
RandomDevices::get_thread_local_instance().get_prng();
#endif
for (idx i = 0; i < N; ++i)
result[i] = ed(gen);
// normalize
double sumprob = std::accumulate(std::begin(result), std::end(result), 0.0);
for (idx i = 0; i < N; ++i)
result[i] /= sumprob;
return result;
}
} /* namespace qpp */
#endif /* RANDOM_H_ */
|
GB_unop__identity_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_uint32)
// op(A') function: GB (_unop_tran__identity_uint64_uint32)
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_uint32)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB060-matrixmultiply-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Classic i-k-j matrix multiplication
*/
#define N 100
#define M 100
#define K 100
double a[N][M],b[M][K],c[N][K];
int mmm()
{
int i,j,k;
#pragma omp parallel for private(j,k)
for (i = 0; i < N; i++)
for (k = 0; k < K; k++)
for (j = 0; j < M; j++)
c[i][j]= c[i][j]+a[i][k]*b[k][j];
return 0;
}
int main()
{
mmm();
return 0;
}
|
data_provider.h | #ifndef DATAPROVIDER_H__
#define DATAPROVIDER_H__
#include <boost/interprocess/allocators/allocator.hpp>
#include <boost/interprocess/containers/vector.hpp>
#include <boost/interprocess/managed_shared_memory.hpp>
#include <boost/interprocess/sync/named_mutex.hpp>
#include <opencv2/opencv.hpp>
#include <chrono>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
#include "net_config.h"
#include "misc.h"
namespace caffe2 {
using namespace boost::interprocess;
template<typename T>
class DataProvider {
public:
DataProvider(const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index);
DataProvider(const vector<string>& imgNames, const vector<int>& labels,
const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index);
DataProvider(const string& file_list, const string& image_path, const string& label_path,
const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index);
~DataProvider();
T* get_data() { return inputData_; }
const vector<int>& get_labels() { return labels_; }
const int get_iterations() { return iterations_; }
void clean_shared_memory(const string& numa_id);
void load_sample(size_t* samples, size_t sample_size, bool dummy_data, const string& file_list,
const string& image_path, const string& label_path);
void load_sample(size_t* sample, size_t sample_size);
private:
std::unique_ptr<NetConf> net_conf_;
T* inputData_;
managed_shared_memory managed_shm_;
int batchSize_ = 1;
int iterations_ = 1;
int sample_offset_ = 0;
unsigned long long inputSize_ = 0;
string dataOrder_ = "NCHW";
string sharedMemory_;
string numaId_;
vector<int> input_shape_;
vector<string> imgNames_;
vector<int> labels_;
vector<T> inputImgs_;
// use for mlperf random index load sample
const size_t IMAGENET_IMAGE_SIZE = 50000;
bool useIndex_ = false;
vector<T> loadBuffer_;
void ParseImageLabel(const string& file_list, const string& image_path,
const string& label_path, const size_t sample_size,
const bool dummy_data);
void ParseImageLabel(const string& file_list);
// methods for preprocessing
void SetMeanScale();
void CenterCrop(cv::Mat* sample_resized, cv::Mat* sample_roi);
void ResizeWithAspect(cv::Mat* sample, cv::Mat* sample_resized);
void ResizeWithRescale(cv::Mat* sample, cv::Mat* sample_resized);
void PreprocessSingleIteration(T* inputImgs,
const vector<string>& imgNames);
void PreprocessUsingCVMethod(T* inputImgs,
const vector<string>& imgNames);
void Preprocess(const bool dummy, T* inputImgs,
const vector<string>& imgNames);
// methods for memory used
void WrapInput(const bool dummy_data);
void WrapSHMInput(const bool dummy_data);
void CreateUseSharedMemory(const bool dummy_data);
void DirectUseSharedMemory(const bool dummy_data);
void WrapLocalInput(const bool dummy_data);
void CleanSharedMemory(const string& numa_id);
};
template<typename T>
DataProvider<T>::DataProvider(const vector<string>& imgNames,const vector<int>& labels,
const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index)
: inputData_(nullptr),
batchSize_(batch_size),
iterations_(iterations),
dataOrder_(data_order),
sharedMemory_(shared_memory_option),
numaId_(numa_id),
imgNames_(imgNames),
labels_(labels),
useIndex_(use_index) {
net_conf_ = get_net_conf(net_conf);
WrapInput(dummy_data);
}
template<typename T>
void DataProvider<T>::load_sample(size_t* samples, size_t sample_size, bool dummy_data,
const string& file_list, const string& image_path,
const string& label_path) {
size_t imagenet_size = 50000;
ParseImageLabel(file_list, image_path, label_path, imagenet_size, dummy_data);
vector<string> sample_names;
vector<int> sample_labels;
auto use_size = sample_size % batchSize_ ? (sample_size / batchSize_ + 1 ) * batchSize_ : sample_size;
auto flexible_size = sample_size < imagenet_size ? sample_size : use_size;
for (size_t i = 0; i < flexible_size; ++i) {
sample_names.push_back(imgNames_[samples[i % sample_size]]);
sample_labels.push_back(labels_[samples[i % sample_size]]);
}
if (sample_size != batchSize_ * iterations_) {
LOG(ERROR) << "batchsize * iteration is not equal to sampled images";
iterations_ = sample_size / batchSize_;
sample_names.resize(batchSize_ * iterations_);
sample_labels.resize(batchSize_ * iterations_);
}
imgNames_ = sample_names;
labels_ = sample_labels;
WrapInput(dummy_data);
}
template<typename T>
void DataProvider<T>::load_sample(size_t* samples, size_t sample_size) {
if (useIndex_) {
vector<int> sample_labels(batchSize_ * iterations_, 0);
if (sharedMemory_ == "CREATE_USE_SHM" || sharedMemory_ == "USE_LOCAL") {
loadBuffer_.resize(iterations_ * batchSize_ * inputSize_, 0);
if (sample_size != batchSize_ * iterations_)
LOG(FATAL) << "sample size is not equal to batchsize * iterations";
#pragma omp parallel for
for (size_t i = 0; i < sample_size; ++i) {
std::memcpy(loadBuffer_.data() + i * inputSize_,
inputData_ + samples[i] * inputSize_,
inputSize_ * sizeof(T));
sample_labels[i] = labels_[samples[i]];
}
std::memcpy(inputData_, loadBuffer_.data(), batchSize_ * iterations_ * inputSize_ * sizeof(T));
labels_ = sample_labels;
if (sharedMemory_ == "CREATE_USE_SHM") {
*(managed_shm_.find<bool>(("SharedMemorySwap" + numaId_).c_str()).first) = true;
}
} else {
int temp_status = 0;
// check whether images has been preprocessed
#pragma omp parallel for
for (size_t i = 0; i < sample_size; ++i) {
sample_labels[i] = labels_[samples[i]];
}
labels_ = sample_labels;
while (!(*(managed_shm_.find<bool>(("SharedMemorySwap" + numaId_).c_str()).first))) {
if (temp_status == 0) {
LOG(INFO) << "image swapping not ready, wait image memory swapping completed";
temp_status++;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
}
}
}
template<typename T>
DataProvider<T>::DataProvider(const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index)
: inputData_(nullptr),
batchSize_(batch_size),
iterations_(iterations),
dataOrder_(data_order),
sharedMemory_(shared_memory_option),
numaId_(numa_id),
useIndex_(use_index) {
net_conf_ = get_net_conf(net_conf);
}
template<typename T>
DataProvider<T>::DataProvider(const string& file_list, const string& image_path,
const string& label_path, const int batch_size, const string& data_order,
const bool dummy_data, const int iterations, const string& net_conf,
const string& shared_memory_option, const string& numa_id, const bool use_index)
: inputData_(nullptr),
batchSize_(batch_size),
iterations_(iterations),
dataOrder_(data_order),
sharedMemory_(shared_memory_option),
numaId_(numa_id),
useIndex_(use_index) {
net_conf_ = get_net_conf(net_conf);
size_t parse_size = batchSize_ * iterations_ ;
if ((batchSize_ * iterations_ < IMAGENET_IMAGE_SIZE) && useIndex_)
parse_size = IMAGENET_IMAGE_SIZE;
ParseImageLabel(file_list, image_path, label_path, parse_size, dummy_data);
WrapInput(dummy_data);
}
template<typename T>
DataProvider<T>::~DataProvider() {
}
template<typename T>
void DataProvider<T>::clean_shared_memory(const string& numa_id) {
CleanSharedMemory(numa_id);
}
template<typename T>
void DataProvider<T>::CleanSharedMemory(const string& numa_id) {
shared_memory_object::remove(("SharedMemory" + numa_id).c_str());
}
template<typename T>
void DataProvider<T>::CreateUseSharedMemory(const bool dummy_data) {
const size_t TOTAL_IMAGE_SIZE = 50001 * 8;
managed_shm_ = managed_shared_memory(open_or_create,
("SharedMemory" + numaId_).c_str() , TOTAL_IMAGE_SIZE * inputSize_);
// check whether shared memory has prepared target image data, if not, prepare target data.
auto shared_image_size = managed_shm_.find_or_construct<int>(("SharedImageSize" + numaId_).c_str())(0);
managed_shm_.find_or_construct<bool>(("SharedMemorySwap" + numaId_).c_str())(false);
const allocator<T, managed_shared_memory::segment_manager>
alloc_inst(managed_shm_.get_segment_manager());
auto shared_input_images =
managed_shm_.find_or_construct<vector<T,
allocator<T,managed_shared_memory::segment_manager>>>(("SharedInputImgs" + numaId_).c_str())(alloc_inst);
// do preprocess only when shared memory don't has enough images buffered.
if (*shared_image_size != iterations_ * batchSize_) {
size_t parse_size = batchSize_ * iterations_ ;
if ((batchSize_ * iterations_ < IMAGENET_IMAGE_SIZE) && useIndex_)
parse_size = IMAGENET_IMAGE_SIZE;
shared_input_images->resize(parse_size * inputSize_);
inputData_ = shared_input_images->data();
Preprocess(dummy_data, inputData_, imgNames_);
*shared_image_size = iterations_ * batchSize_;
} else {
inputData_ = shared_input_images->data();
}
}
template<typename T>
void DataProvider<T>::DirectUseSharedMemory(const bool dummy_data) {
int temp_status = 0;
while (temp_status == 0) {
try {
managed_shm_ = managed_shared_memory(open_only, ("SharedMemory" + numaId_).c_str());
temp_status = 1;
}
catch(boost::interprocess::interprocess_exception) {
LOG(INFO) << "check whether shared memory created, use CREATE_USE_SHM in command line";
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
}
temp_status = 0;
// check whether images has been preprocessed
while (*(managed_shm_.find<int>(("SharedImageSize" + numaId_).c_str()).first)
!= batchSize_ * iterations_) {
if (temp_status == 0) {
LOG(INFO) << "shared image size not satisfied, wait preprocess completed";
temp_status++;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
auto shared_input_images = managed_shm_.find<vector<T, allocator<T,
managed_shared_memory::segment_manager>>>(("SharedInputImgs" + numaId_).c_str());
inputData_ = shared_input_images.first->data();
}
template<typename T>
void DataProvider<T>::WrapSHMInput(const bool dummy_data) {
LOG(INFO) << "use shared memory: " << sharedMemory_;
if (sharedMemory_ == "CREATE_USE_SHM") {
CleanSharedMemory(numaId_);
CreateUseSharedMemory(dummy_data);
} else {
DirectUseSharedMemory(dummy_data);
}
}
template<typename T>
void DataProvider<T>::WrapLocalInput(const bool dummy_data) {
LOG(INFO) << "use local memory";
if (dummy_data) inputImgs_.resize(batchSize_ * inputSize_, 0);
else inputImgs_.resize(iterations_ * batchSize_ * inputSize_, 0);
inputData_ = inputImgs_.data();
Preprocess(dummy_data, inputData_, imgNames_);
}
template<typename T>
void DataProvider<T>::WrapInput(const bool dummy_data) {
inputSize_ = net_conf_->channels * net_conf_->height * net_conf_->width;
if (sharedMemory_ == "USE_LOCAL") {
WrapLocalInput(dummy_data);
} else {
WrapSHMInput(dummy_data);
}
}
template<typename T>
void DataProvider<T>::SetMeanScale() {
}
template<typename T>
void DataProvider<T>::ResizeWithAspect(cv::Mat* sample, cv::Mat* sample_resized) {
auto scale = net_conf_->aspect_scale;
auto new_height = static_cast<int>(100. * net_conf_->height / scale);
auto new_width = static_cast<int>(100. * net_conf_->width / scale);
auto inter_pol = net_conf_->net_name == "resnet50" ? cv::INTER_AREA: cv::INTER_LINEAR;
if ((*sample).rows > (*sample).cols) {
auto res = static_cast<int>((*sample).rows * new_width / (*sample).cols);
cv::resize((*sample), (*sample_resized), cv::Size(new_width, res), (0, 0), (0, 0), inter_pol);
} else {
auto res = static_cast<int>((*sample).cols * new_height / (*sample).rows);
cv::resize((*sample), (*sample_resized), cv::Size(res, new_height), (0, 0), (0, 0), inter_pol);
}
}
// resize image using rescale
template<typename T>
void DataProvider<T>::ResizeWithRescale(cv::Mat* sample, cv::Mat* sample_rescale) {
auto aspect = static_cast<float>((*sample).cols) / (*sample).rows;
if (aspect > 1) {
auto res = static_cast<int>(net_conf_->rescale_size * aspect);
cv::resize((*sample), (*sample_rescale), cv::Size(res, net_conf_->rescale_size));
} else {
auto res = static_cast<int>(net_conf_->rescale_size / aspect);
cv::resize((*sample), (*sample_rescale), cv::Size(net_conf_->rescale_size, res));
}
}
template<typename T>
void DataProvider<T>::CenterCrop(cv::Mat* sample_resized, cv::Mat* sample_roi) {
int x = (*sample_resized).cols;
int y = (*sample_resized).rows;
int startx = static_cast<int>(std::floor(x * 0.5 - net_conf_->width * 0.5));
int starty = static_cast<int>(std::floor(y * 0.5 - net_conf_->height * 0.5));
cv::Rect roi(startx, starty, net_conf_->width, net_conf_->height);
// roi image
(*sample_roi) = (*sample_resized)(roi);
}
template<typename T>
void DataProvider<T>::PreprocessUsingCVMethod(T* inputImgs,
const vector<string>& imgNames) {
// wrap and process image files.
cv::Mat mean(net_conf_->width, net_conf_->height, CV_32FC3,
cv::Scalar(net_conf_->mean_value[0], net_conf_->mean_value[1], net_conf_->mean_value[2]));
cv::Mat scale(net_conf_->width, net_conf_->height, CV_32FC3,
cv::Scalar(net_conf_->scale, net_conf_->scale, net_conf_->scale));
bool quantized_ = false;
if (sizeof(T) == sizeof(char)) quantized_ = true;
int converted_type;
if (quantized_) {
if (dataOrder_ == "NCHW") converted_type = CV_8SC1;
else if (dataOrder_ == "NHWC") converted_type = CV_8SC3;
} else {
if (dataOrder_ == "NCHW") converted_type = CV_32FC1;
else if (dataOrder_ == "NHWC") converted_type = CV_32FC3;
}
#pragma omp parallel for
for (size_t i = 0; i < imgNames.size(); ++i) {
auto input_data = inputImgs + i * inputSize_;
cv::Mat img = cv::imread(imgNames[i]);
// convert the input image to the input image format of the network.
cv::Mat sample;
if (img.channels() == 3 && net_conf_->channels == 1)
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == 4 && net_conf_->channels == 1)
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == 4 && net_conf_->channels == 3)
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == 1 && net_conf_->channels == 3)
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
cv::Mat sample_roi;
if (net_conf_->preprocess_method == "ResizeWithAspect")
ResizeWithAspect(&sample, &sample_resized);
else
ResizeWithRescale(&sample, &sample_resized);
CenterCrop(&sample_resized, &sample_roi);
cv::Mat sample_float;
if (net_conf_->channels == 3) {
sample_roi.convertTo(sample_float, CV_32FC3);
} else sample_roi.convertTo(sample_float, CV_32FC1);
cv::Mat sample_subtract, sample_normalized;
if (net_conf_->net_name=="resnet50") {
cv::subtract(sample_float, mean, sample_subtract);
cv::multiply(sample_subtract, scale, sample_normalized);
} else if (net_conf_->net_name=="mobilenetv1") {
cv::subtract(sample_float, mean, sample_subtract);
cv::divide(sample_subtract, mean, sample_normalized);
}
vector<cv::Mat> input_channels;
if (net_conf_->bgr2rgb) cv::cvtColor(sample_normalized, sample_normalized, cv::COLOR_RGB2BGR);
if (quantized_) sample_normalized.convertTo(sample_normalized, converted_type, 1.0 / net_conf_->input_scale);
if (dataOrder_ == "NCHW") {
for (auto j = 0; j < net_conf_->channels; ++j) {
cv::Mat channel(net_conf_->height, net_conf_->width, converted_type, input_data);
input_channels.push_back(channel);
input_data += net_conf_->width * net_conf_->height;
}
/* This operation will write the separate BGR planes directly to the
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
cv::split(sample_normalized, input_channels);
} else if (dataOrder_ == "NHWC") {
cv::Mat channel(net_conf_->height, net_conf_->width, converted_type, input_data);
sample_normalized.copyTo(channel);
}
// add zero_point 128 to u8 format
auto u8_input_opt_option = getenv("U8_INPUT_OPT");
if (quantized_ && (u8_input_opt_option != NULL) && (atoi(u8_input_opt_option) != 0))
for(size_t i = 0; i < inputSize_; ++i) input_data[i] += 128;
}
}
// preprocess the img according to batch_size * iterations
template<typename T>
void DataProvider<T>::Preprocess(const bool dummy, T* inputImgs,
const vector<string>& imgNames) {
if (!dummy) {
LOG(INFO) << "this process will preprocess images";
PreprocessUsingCVMethod(inputImgs, imgNames);
} else {
// only use one batch dummy
for (int i = 0; i < batchSize_ * inputSize_; ++i)
inputImgs[i] = static_cast<T>(std::rand()) / RAND_MAX;
}
}
// preprocess the img according to batch size
template<typename T>
void DataProvider<T>::PreprocessSingleIteration(T* inputImgs,
const vector<string>& imgNames) {
// wrap and process image files.
int img_size = net_conf_->channels * net_conf_->height * net_conf_->width;
cv::Mat float_img;
for (size_t i = 0; i < imgNames.size(); ++i) {
cv::Mat raw_img = cv::imread(imgNames[i]);
cv::Mat resized_img;
cv::resize(raw_img,
resized_img,
cv::Size(net_conf_->width, net_conf_->height),
0,
0,
cv::INTER_LINEAR);
resized_img.convertTo(float_img, CV_32FC3);
int tran_c = 0;
int index = 0;
for (int h = 0; h < net_conf_->height; ++h) {
for (int w = 0; w < net_conf_->width; ++w) {
for ( int c = 0; c < net_conf_->channels; ++c) {
tran_c = net_conf_->bgr2rgb ? (2-c) : c;
if (dataOrder_ == "NHWC") {
index = img_size * i + h * net_conf_->width * net_conf_->channels + w * net_conf_->channels + c;
} else if (dataOrder_ == "NCHW") {
index = img_size * i + c * net_conf_->width * net_conf_->height + h * net_conf_->width + w;
}
inputImgs[index] =
static_cast<T>((float_img.ptr<cv::Vec3f>(h)[w][tran_c] -
net_conf_->mean_value[c]) * net_conf_->scale);
}
}
}
}
}
template<typename T>
void DataProvider<T>::ParseImageLabel(const string& file_list) {
// wrap and process image files.
std::ifstream image_file(file_list);
string val;
while (getline(image_file, val)) {
auto pos = val.find(" ");
auto label = std::atoi(val.substr(pos+1).c_str());
labels_.push_back(label);
imgNames_.push_back(val.substr(0, pos));
if (imgNames_.size() == batchSize_ * iterations_) break;
}
image_file.close();
if (imgNames_.size() < batchSize_ * iterations_) {
LOG(ERROR) << "check val.txt to prepare proper quantity of images!";
LOG(FATAL) << "batch * iterations_ size is too large!";
}
}
template<typename T>
void DataProvider<T>::ParseImageLabel(const string& file_list, const string& image_path,
const string& label_path, const size_t sample_size,
const bool dummy_data) {
if (dummy_data) {
LOG(INFO) << "dummy data will not parse the image";
labels_.resize(sample_size);
return;
}
string val;
// wrap and process image files.
if (!file_list.empty() || (!label_path.empty() && !image_path.empty())) {
string file_name;
if (!file_list.empty()) file_name = file_list;
else file_name = label_path;
std::ifstream image_file(file_name);
while (getline(image_file, val)) {
auto pos = val.find(" ");
auto label = std::atoi(val.substr(pos + 1).c_str());
labels_.push_back(label);
if (!file_list.empty()) {
imgNames_.push_back(val.substr(0, pos));
} else {
string image_val = val.substr(0, pos);
auto image_pos = (image_val.find_last_of("/"));
imgNames_.push_back(image_path + image_val.substr(image_pos + 1));
}
if (imgNames_.size() == sample_size) break;
}
image_file.close();
} else if (image_path == "") {
LOG(FATAL) << "image path should be given!";
} else {
if (label_path == "") {
LOG(WARNING) << "label path not given, accuracy not caculated!";
DIR* image_dir = opendir(image_path.c_str());
if (image_dir == nullptr) LOG(FATAL) << "can't read image path!";
struct dirent* file_name;
int filter_dir = 0;
while((file_name = readdir(image_dir)) != nullptr) {
// linux dir will read dir "." and ".." as file_name, so filter that
if ((filter_dir++) < 2) continue;
imgNames_.push_back(image_path + file_name->d_name);
if (imgNames_.size() == sample_size) break;
}
}
}
if (imgNames_.size() < sample_size) {
LOG(ERROR) << "image size is " << imgNames_.size() << " sample size is " << sample_size;
size_t append_count = sample_size - imgNames_.size();
size_t real_image_size = imgNames_.size();
for (size_t i = 0; i < append_count; ++i) {
imgNames_.push_back(imgNames_[i % real_image_size]);
labels_.push_back(labels_[i % real_image_size]);
}
return;
}
}
} // using namespace caffe2
#endif // DATAPROVIDER_H__
|
id2pos.c | /*
gcc -fopenmp -lm -lgsl -lgslcblas -lgad -L ./ mk_id_list.c -o ~/bin/mk_id_list OctTree.o
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include "libgad.h"
#include "ompfuncs.h"
#define USE 63
// #ifdef LONGIDS
// typedef unsigned long long IDtype;
// #else
// typedef unsigned int IDtype;
// #endif
int cmp_IDtype (const void *first, const void *second)
{
IDtype *a = (IDtype *)first;
IDtype *b = (IDtype *)second;
if (*a > *b) return 1;
else if (*a < *b) return -1;
else return 0;
}
const int MAX_HALO_ID = 100000;
const float EXTEND = 500;
const float TRACE_FACTOR = 2.;
const float SEARCHDIST = 25;
const float MAXDIST = 3000.;
const float SOFTENING = 1.0;
void usage()
{
fprintf(stderr," search positions of ID list - reads a list of IDs and creates position file of corresponding particles\n");
fprintf(stderr,"\t-o \t<ID list base file name>\n");
fprintf(stderr,"\t-i \t<snaphsot file name>\n");
fprintf(stderr,"\t-max\t<max Halo ID>\n");
fprintf(stderr,"\t-use\t<bitcode particle types to use (default 2ΒΉ)>\n\n");
exit(1);
}
int main (int argc, char *argv[])
{
FILE *fp;
char infile[256];
char outbase[256];
char catname[256];
char **output;
int i,j,k, usepart;
struct gadpart *part, *wpart;
struct header head;
int max_halo_id = MAX_HALO_ID;
float extend = EXTEND;
float trace_factor = TRACE_FACTOR;
int verbose = 0;
float searchdist = SEARCHDIST;
float def_maxdist = MAXDIST;
double conv_dist = 1.;
int start_id = 0;
int num_halos = 0;
int write_catalogue = 0;
int write_gad_file = 0;
int outpos = 1;
double soft = SOFTENING;
strcpy(outbase,"idlist");
i=1;
usepart=USE;
if (1==argc) usage();
while (i<argc)
{
if (!strcmp(argv[i],"-i"))
{
i++;
strcpy(infile,argv[i]);
i++;
}
else if (*argv[i]!='-')
{
strcpy(infile,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-o"))
{
i++;
strcpy(outbase,argv[i]);
i++;
}
else if (!strcmp(argv[i],"-c"))
{
i++;
strcpy(catname,argv[i]);
write_catalogue = 1;
i++;
}
else if (!strcmp(argv[i],"-gad"))
{
i++;
write_gad_file = 1;
}
else if (!strcmp(argv[i],"-pos"))
{
i++;
outpos = 1;
}
else if (!strcmp(argv[i],"-v"))
{
i++;
verbose = 1;
}
else if (!strcmp(argv[i],"-s"))
{
i++;
start_id = atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-max"))
{
i++;
max_halo_id = atoi(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-e"))
{
i++;
extend = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-sfl"))
{
i++;
soft = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-md"))
{
i++;
def_maxdist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-cd"))
{
i++;
conv_dist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-tf"))
{
i++;
trace_factor = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-sd"))
{
i++;
searchdist = atof(argv[i]);
i++;
}
else if (!strcmp(argv[i],"-use")) {
i++;
if (!strcmp(argv[i],"all")) usepart=63;
else usepart=atoi(argv[i]);
i++;
} else {
usage();
}
}
if (verbose)
{
printf("reading snapshot\n");
fflush(stdout);
}
unsigned int numpart_all;
if (!(numpart_all=readgadget_part(infile, &head, &part)))
{
extern int libgaderr;
printf("error reading file %s\nError Code %d\n",infile, libgaderr);
exit(1);
}
if (verbose)
{
printf("sorting snapshot\n");
fflush(stdout);
}
myqsort(part, numpart_all, sizeof(gadpart), cmp_id);
/*********************************************************************
Program code goes here
*********************************************************************/
if (verbose)
{
printf("main loop...\n");
fflush(stdout);
}
int haloid;
#pragma omp parallel for private (i,j,k) reduction (+ : num_halos)
for ( haloid = start_id; haloid <= max_halo_id; haloid++ )
{
char idlistname[128];
sprintf(idlistname, "%s_%d", outbase, haloid);
char posfilename[128];
sprintf(posfilename, "%s_positions_%d", outbase, haloid);
FILE *fp = fopen(idlistname, "rb");
if (fp == NULL)
{
continue;
}
num_halos++;
int numids=0;
fltarr center;
IDtype *idlist;
fltarr *pos = NULL;
float maxdist = 0;
fread(&numids, sizeof(int), 1, fp);
fread(center, sizeof(float), 3, fp);
if (verbose)
{
printf("haloid %d | numids %d | center %g %g %g\n", haloid, numids, center[0], center[1], center[2]);
fflush(stdout);
}
if (numids)
{
fread(&maxdist, sizeof(float), 1, fp);
idlist = calloc(numids, sizeof(IDtype));
fread(&idlist[0], sizeof(IDtype), numids, fp);
qsort(idlist, numids, sizeof(IDtype), cmp_IDtype);
pos = (fltarr*) calloc(numids, sizeof(fltarr));
}
fclose(fp);
if (verbose)
{
printf("haloid %d | center %g %g %g\n", haloid, center[0], center[1], center[2]);
fflush(stdout);
}
if (numids)
{
int numfnd = 0;
gadpart *start = part;
for ( i = 0; i < numids; i++ )
{
gadpart *fnd;
gadpart idpart;
idpart.id = idlist[i];
long int size = &part[numpart_all] - start;
fnd = bsearch( &idpart, start, size, sizeof(gadpart), cmp_id);
if (fnd != NULL)
{
start = fnd;
for ( j = 0; j < 3; j++)
pos[numfnd][j] = fnd->pos[j] / head.boxsize;
numfnd++;
}
// if (numfnd >= numids) break;
}
if (verbose)
{
printf("haloid %d | numfnd %d\n", haloid, numfnd);
if (numfnd != numids)
{
fprintf(stderr, "particle not found | halo %d\n", haloid);
exit(1);
}
fflush(stdout);
}
}
int totnumids = numids;
fp = fopen(posfilename, "w");
fwrite(&totnumids, sizeof(int), 1, fp);
if (numids)
fwrite(&pos[0], sizeof(fltarr), numids, fp);
fclose(fp);
if (numids)
{
if (outpos)
free(pos);
}
}
return 0;
}
|
GB_unaryop__ainv_fp64_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp64_int8
// op(A') function: GB_tran__ainv_fp64_int8
// C type: double
// A type: int8_t
// cast: double cij = (double) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp64_int8
(
double *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp64_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pca_kmeans.c | #include "pca_kmeans.h"
#include "kmeans_utils.h"
#include "../../utils/matrix/csr_matrix/csr_to_vector_list.h"
#include "../../utils/matrix/vector_list/vector_list_math.h"
#include "../../utils/matrix/csr_matrix/csr_math.h"
#include "../../utils/vector/common/common_vector_math.h"
#include "../../utils/vector/sparse/sparse_vector_math.h"
#include "../../utils/fcl_logging.h"
#include <math.h>
#include <unistd.h>
#include <float.h>
struct kmeans_result* pca_kmeans(struct csr_matrix* samples, struct kmeans_params *prms) {
uint32_t i;
uint64_t j;
struct sparse_vector* pca_projection_samples; /* projection matrix of samples */
struct sparse_vector* pca_projection_clusters; /* projection matrix of clusters */
struct kmeans_result* res;
uint32_t disable_optimizations;
VALUE_TYPE* vector_lengths_pca_samples;
VALUE_TYPE* vector_lengths_pca_clusters;
/* pca_kmeans: contains all samples which are eligible for the cluster
* no change optimization.
*/
uint32_t *eligible_for_cluster_no_change_optimization;
struct general_kmeans_context ctx;
pca_projection_clusters = NULL;
pca_projection_samples = NULL;
initialize_general_context(prms, &ctx, samples);
disable_optimizations = prms->ext_vects == NULL;
if (!disable_optimizations) {
if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) {
/* create pca projections for the samples */
pca_projection_samples = matrix_dot(samples, prms->ext_vects);
calculate_vector_list_lengths(pca_projection_samples, samples->sample_count, &vector_lengths_pca_samples);
}
/* create pca projections for the clusters */
pca_projection_clusters = sparse_vectors_matrix_dot(ctx.cluster_vectors,
ctx.no_clusters,
prms->ext_vects);
vector_lengths_pca_clusters = NULL;
}
eligible_for_cluster_no_change_optimization = (uint32_t*) calloc(ctx.samples->sample_count, sizeof(uint32_t));
for (i = 0; i < prms->iteration_limit && !ctx.converged && !prms->stop; i++) {
/* track how many projection calculations were made / saved */
uint64_t saved_calculations_pca, saved_calculations_prev_cluster;
uint64_t done_pca_calcs, saved_calculations_cauchy;
/* reset all calculation counters */
done_pca_calcs = 0;
saved_calculations_cauchy = 0;
saved_calculations_prev_cluster = 0;
saved_calculations_pca = 0;
/* initialize data needed for the iteration */
pre_process_iteration(&ctx);
free(vector_lengths_pca_clusters);
calculate_vector_list_lengths(pca_projection_clusters, ctx.no_clusters, &vector_lengths_pca_clusters);
#pragma omp parallel for schedule(dynamic, 1000)
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all samples */
VALUE_TYPE dist;
uint64_t cluster_id, sample_id;
struct sparse_vector pca_projection;
pca_projection.nnz = 0;
pca_projection.keys = NULL;
pca_projection.values = NULL;
if (omp_get_thread_num() == 0) check_signals(&(prms->stop));
if (!prms->stop) {
sample_id = j;
for (cluster_id = 0; cluster_id < ctx.no_clusters; cluster_id++) {
/* iterate over all cluster centers */
/* if we are not in the first iteration and this cluster is empty, continue to next cluster */
if (i != 0 && ctx.cluster_counts[cluster_id] == 0) continue;
if (!disable_optimizations) {
/* pca_kmeans */
/* we already know the distance to the cluster from last iteration */
if (cluster_id == ctx.previous_cluster_assignments[sample_id]) continue;
/* clusters which did not move in the last iteration can be skipped if the sample is eligible */
if (eligible_for_cluster_no_change_optimization[sample_id] && ctx.clusters_not_changed[cluster_id]) {
/* cluster did not move and sample was eligible for this check. distance to this cluster can not be less than to our best from last iteration */
saved_calculations_prev_cluster += 1;
goto end;
}
/* evaluate cauchy approximation. fast but not good */
dist = lower_bound_euclid(ctx.vector_lengths_clusters[cluster_id]
, ctx.vector_lengths_samples[sample_id]);
if (dist >= ctx.cluster_distances[sample_id]) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_cauchy += 1;
goto end;
}
if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) {
/* evaluate pca approximation. using precalculated feature map*/
dist = euclid_vector(pca_projection_samples[sample_id].keys
, pca_projection_samples[sample_id].values
, pca_projection_samples[sample_id].nnz
, pca_projection_clusters[cluster_id].keys
, pca_projection_clusters[cluster_id].values
, pca_projection_clusters[cluster_id].nnz
, vector_lengths_pca_samples[sample_id]
, vector_lengths_pca_clusters[cluster_id]);
} else {
/* evaluate pca approximation. feature mapping is done on demand */
if (pca_projection.keys == NULL) {
vector_matrix_dot(pca_projection_samples[sample_id].keys,
pca_projection_samples[sample_id].values,
pca_projection_samples[sample_id].nnz,
prms->ext_vects,
&pca_projection);
}
dist = euclid_vector(pca_projection.keys, pca_projection.values, pca_projection.nnz
, pca_projection_clusters[cluster_id].keys
, pca_projection_clusters[cluster_id].values
, pca_projection_clusters[cluster_id].nnz
, ctx.vector_lengths_samples[sample_id]
, ctx.vector_lengths_clusters[cluster_id]);
}
done_pca_calcs += 1;
if (dist >= ctx.cluster_distances[sample_id] && fabs(dist - ctx.cluster_distances[sample_id]) >= 1e-6) {
/* approximated distance is larger than current best distance. skip full distance calculation */
saved_calculations_pca += 1;
goto end;
}
}
/* printf("Approximated dist = %.4f - %.4f", dist, ctx.cluster_distances[sample_id]); */
/* if we reached this point we need to calculate a full euclidean distance */
dist = euclid_vector_list(ctx.samples, sample_id, ctx.cluster_vectors, cluster_id
, ctx.vector_lengths_samples, ctx.vector_lengths_clusters);
/* printf("actual dist = %.4f\n", dist); */
ctx.done_calculations += 1;
if (dist < ctx.cluster_distances[sample_id]) {
/* replace current best distance with new distance */
ctx.cluster_distances[sample_id] = dist;
ctx.cluster_assignments[sample_id] = cluster_id;
}
end:;
}
}
if (!disable_optimizations) {
free_null(pca_projection.keys);
free_null(pca_projection.values);
}
}
post_process_iteration(&ctx, prms);
/* shift clusters to new position */
calculate_shifted_clusters(&ctx);
switch_to_shifted_clusters(&ctx);
if (!disable_optimizations) {
/* update only projections for cluster that shifted */
update_dot_products(ctx.cluster_vectors,
ctx.no_clusters,
prms->ext_vects,
ctx.clusters_not_changed,
pca_projection_clusters);
d_add_ilist(&(prms->tr), "iteration_pca_calcs", done_pca_calcs);
d_add_ilist(&(prms->tr), "iteration_pca_calcs_success", saved_calculations_pca + saved_calculations_cauchy);
#pragma omp parallel for
for (j = 0; j < ctx.samples->sample_count; j++) {
/* iterate over all samples */
VALUE_TYPE previous_distance;
previous_distance = ctx.cluster_distances[j];
/* if the cluster did move. calculate the new distance to this sample */
if (ctx.clusters_not_changed[ctx.cluster_assignments[j]] == 0) {
ctx.cluster_distances[j]
= euclid_vector_list(ctx.samples, j
, ctx.cluster_vectors, ctx.cluster_assignments[j]
, ctx.vector_lengths_samples
, ctx.vector_lengths_clusters);
/*#pragma omp critical*/
ctx.done_calculations += 1;
ctx.total_no_calcs += 1;
}
/* if the cluster moved towards this sample,
* then this sample is eligible to skip calculations to centers which
* did not move in the last iteration
*/
if (ctx.cluster_distances[j] <= previous_distance) {
eligible_for_cluster_no_change_optimization[j] = 1;
} else {
eligible_for_cluster_no_change_optimization[j] = 0;
}
}
} else {
/* naive k-means without any optimization remembers nothing from
* the previous iteration.
*/
for (j = 0; j < ctx.samples->sample_count; j++) {
ctx.cluster_distances[j] = DBL_MAX;
}
}
print_iteration_summary(&ctx, prms, i);
/* print projection statistics */
if (prms->verbose) LOG_INFO("PCA statistics c:%" PRINTF_INT64_MODIFIER "u/b:%" PRINTF_INT64_MODIFIER "u/db:%" PRINTF_INT64_MODIFIER "u/pc:%" PRINTF_INT64_MODIFIER "u"
, saved_calculations_cauchy
, saved_calculations_pca
, done_pca_calcs
, saved_calculations_prev_cluster);
}
if (prms->verbose) LOG_INFO("total total_no_calcs = %" PRINTF_INT64_MODIFIER "u", ctx.total_no_calcs);
res = create_kmeans_result(prms, &ctx);
/* cleanup all */
if (!disable_optimizations) {
if (prms->kmeans_algorithm_id == ALGORITHM_PCA_KMEANS) {
free_vector_list(pca_projection_samples, samples->sample_count);
free(vector_lengths_pca_samples);
free(pca_projection_samples);
}
free_vector_list(pca_projection_clusters, ctx.no_clusters);
free(pca_projection_clusters);
free(vector_lengths_pca_clusters);
}
free_general_context(&ctx, prms);
free_null(eligible_for_cluster_no_change_optimization);
return res;
}
|
li_rudy_2011.c | #include "li_rudy_2011.h"
// TODO: Move this to a function and remember that each cell must have this variables ...
// Do the same for the GPU code ...
//real ical, camkactive;
//real qtr1,qtr2;
//real qrel1, qrel2, qup2;
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
static bool first_call = true;
if(first_call) {
#ifdef _WIN32
printf("Using Li & Rudy 2011 CPU model\n");
#else
print_to_stdout_and_file("Using Li & Rudy 2011 CPU model\n");
#endif
first_call = false;
}
sv[0] = -84.058830; // V millivolt
sv[1] = 0.000821; // m dimensionless
sv[2] = 0.995741; // h dimensionless
sv[3] = 0.999872; // j dimensionless
sv[4] = 0.000016; // d dimensionless
sv[5] = 0.999193; // f dimensionless
sv[6] = 0.988692; // f2 dimensionless
sv[7] = 0.965405; // fca dimensionless
sv[8] = 0.739378; // fca2 dimensionless
sv[9] = 0.001114; // xs1 dimensionless
sv[10] = 0.042234; // xs2 dimensionless
sv[11] = 0.069808; // xr dimensionless
sv[12] = 0.000119; // a dimensionless
sv[13] = 0.992541; // i dimensionless
sv[14] = 0.745628; // i2 dimensionless
sv[15] = 0.000329; // ml dimensionless
sv[16] = 0.046538; // ml3 dimensionless
sv[17] = 0.984170; // hl dimensionless
sv[18] = 0.853893; // hl3 dimensionless
sv[19] = 0.912569; // jl dimensionless
sv[20] = 0.827885; // jl3 dimensionless
sv[21] = 0.000135; // casss dimensionless
sv[22] = 1.510741; // cajsr dimensionless
sv[23] = 1.537577; // cacsr dimensionless
sv[24] = 1.538668; // cansr dimensionless
sv[25] = 0.000130; // cassl dimensionless
sv[26] = 11.501546; // nai dimensionless
sv[27] = 11.501230; // nassl dimensionless
sv[28] = 11.501240; // nasss dimensionless
sv[29] = 136.422946; // ki dimensionless
sv[30] = 0.000053; // cai millimolar
sv[31] = 0.000437; // b dimensionless
sv[32] = 0.990384; // g dimensionless
sv[33] = 0.535627; // u dimensionless
sv[34] = 0.182859; // y dimensionless
sv[35] = 0.010600; // camktrap dimensionless
// Additional parameters
sv[36] = 0.0; // ical
sv[37] = 0.0; // camkactive
sv[38] = 0.0; // qrel1
sv[39] = 0.0; // qrel2
sv[40] = 0.0; // qup2
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
// Save odl value of the state vector
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
// Compute Right-hand-side of the ODE's
RHS_cpu(rY, rDY, dt, stim_current);
// Solve the ODE's using a mix between Forward Euler and Rush-Larsen
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real dt, real stim_current) {
//const double dtmin = 0.001;
//const double dtmed = 0.005;
//const double dtmax = 0.1;
real v_old = sv[0];
real m_old = sv[1];
real h_old = sv[2];
real j_old = sv[3];
real d_old = sv[4];
real f_old = sv[5];
real f2_old = sv[6];
real fca_old = sv[7];
real fca2_old = sv[8];
real xs1_old = sv[9];
real xs2_old = sv[10];
real xr_old = sv[11];
real a_old = sv[12];
real i_old = sv[13];
real i2_old = sv[14];
real ml_old = sv[15];
real ml3_old = sv[16];
real hl_old = sv[17];
real hl3_old = sv[18];
real jl_old = sv[19];
real jl3_old = sv[20];
real casss_old = sv[21];
real cajsr_old = sv[22];
real cacsr_old = sv[23];
real cansr_old = sv[24];
real cassl_old = sv[25];
real nai_old = sv[26];
real nassl_old = sv[27];
real nasss_old = sv[28];
real ki_old = sv[29];
real cai_old = sv[30];
real b_old = sv[31];
real g_old = sv[32];
real u_old = sv[33];
real y_old = sv[34];
real camktrap_old = sv[35];
real ical = sv[36];
real camkactive = sv[37];
real qrel1 = sv[38];
real qrel2 = sv[39];
real qup2 = sv[40];
// Parameters
// CELL GEOMETRY
const real pi = 3.14;
const real radius = 0.00175;
const real length = 0.0164;
const real rcg = 1.54;
const real vcell = 1000*pi*radius*radius*length;
const real ageo = 2*pi*radius*radius + 2*pi*radius*length;
const real acap = rcg*ageo;
const real vmyo = vcell * 0.60;
const real vnsr = vcell * 0.04;
//const real vmito = vcell * 0.18;
const real vjsr = vcell * 0.002;
const real vcsr = vcell * 0.008;
const real vsss = vcell * 0.02;
const real vssl = vcell * 0.15;
// PHYSICAL CONSTANTS
const real frdy = 96485;
const real R = 8314;
const real temp = 310;
const real nao = 140;
const real cao = 1.8;
const real ko = 5.4;
//const real clo = 100;
const real zna = 1;
const real zk = 1;
//const real zcl = -1;
const real zca = 2;
//const real ganai = 0.75;
//const real ganao = 0.75;
//const real gaki = 0.75;
//const real gako = 0.75;
const real gacai = 1.0;
const real gacao = 0.341;
// CAMKII DYNAMICS
const real camk0 = 0.05;
const real alphacamk = 0.05;
const real betacamk = 0.00068;
const real kmcam = 0.0015;
const real kmcamk = 0.15;
//const real fca_dtaucamkbar = 10.0;
// MEMBRANE IONIC CURRENTS
const real gna = 18;
const real gnal2 = 0.052;
const real gnal3 = 0.018;
const real pca = 1.9926e-4;
//const real powtau = 10;
const real gcat = 0.07875;
const real gtos = 0.1414;
const real gtof = 0.042;
const real prnak = 0.014;
//const real gnab = 0.0025;
const real pcab = 3.99e-8;
const real pnab = 0.64e-8;
const real inacamax = 2.52;
const real kmcaact = 0.000125;
const real kmnai1 = 12.3;
const real kmnao = 87.5;
const real kmcai = 0.0036;
const real kmcao = 1.3;
const real nu = 0.35;
const real ksat = 0.27;
const real ibarnak = 1.1004;
const real ipcabar = 0.0115;
const real kmpca = 0.0005;
// CALCIUM FLUXES AND CONCENTRATIONS
const real IP3 = 0.0001;
const real k1 = 150000;
const real k1a = 16.5;
const real k0 = 96000;
const real k0a = 9.6;
const real k2 = 1800;
const real k2a = 0.21;
const real tauip3r = 3.7;
const real dqupcamkbar = 0.75;
const real dkmplbbar = 0.00017;
const real kmup = 0.00028;
const real nsrbar = 15.0;
const real bsrbar = 0.019975;
const real kmbsr = 0.00087;
const real bslbar = 0.4777;
const real kmbsl = 0.0087;
const real csqnbar = 2.88;
const real kmcsqn = 0.8;
const real cmdnbar = 0.1125;
const real kmcmdn = 2.38e-3;
const real trpnbar = 3.15e-2;
const real kmtrpn = 0.5e-3;
const real trpnbar1 = 3.5e-3;
const real cmdnbar1 = 1.25e-2;
const real csqnbar1 = 1.2;
// CALCIUM FLUXES RATE CONSTANTS
const real tautr1 = 120;
const real tautr2 = 120;
const real gaptau = 12;
const real sstau = 0.2;
// comp_revs()
real eca = (R*temp/(zca*frdy))*log(cao/cassl_old);
real ena = (R*temp/frdy)*log(nao/nassl_old);
real ek = (R*temp/frdy)*log(ko/ki_old);
// comp_ina()
real ma = 0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13)));
real mb = 0.16*exp(-v_old/11);
real ha, hb, ja, jb;
if (v_old<-40)
{
ha = 0.135*exp((70+v_old)/-6.8);
hb = 3.56*exp(0.079*v_old)+310000*exp(0.35*v_old);
ja = (-127140*exp(0.2444*v_old)-0.003474*exp(-0.04391*v_old))*(v_old+37.78)/(1+exp(0.311*(v_old+79.23)));
jb = 0.1212*exp(-0.01052*v_old)/(1+exp(-0.1378*(v_old+40.14)));
}
else
{
ha = 0.0;
hb = 1/(0.13*(1+exp((v_old+10.66)/-11.1)));
ja = 0.0;
jb = 0.3*exp(-0.0000002535*v_old)/(1+exp(-0.1*(v_old+32)));
}
real mtau = 1/(ma+mb);
real htau = 1/(ha + hb);
real jtau = 1/(ja+jb);
real mss = ma*mtau;
real hss = ha*htau;
real jss = 1*ja*jtau;
// Rush-Larsen
m_old = mss-(mss-m_old)*exp(-dt/mtau);
h_old = hss-(hss-h_old)*exp(-dt/htau);
j_old = jss-(jss-j_old)*exp(-dt/jtau);
real ina = gna*pow(m_old,3)*h_old*j_old*(v_old-ena);
// comp_inal()
real mltau = 1/(0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13))) + 0.16*exp(-v_old/11));
real ml3tau = mltau;
real mlss = 1/(1+exp(-(v_old+28)/7));
real ml3ss = 1/(1+exp(-(v_old+63)/7));
real hltau = 162+132/(1+exp(-(v_old+28)/5.5));
real hl3tau = 0.5*hltau;
real hlss = 1/(1+exp((v_old+28)/12));
real hl3ss = 1/(1+exp((v_old+63)/12));
real jltau = 411;
real jl3tau = 0.5*jltau;
real jlss = hlss;
real jl3ss = hl3ss;
// Rush-Larsen
ml_old = mlss-(mlss-ml_old)*exp(-dt/mltau);
ml3_old = ml3ss-(ml3ss-ml3_old)*exp(-dt/ml3tau);
hl_old = hlss-(hlss-hl_old)*exp(-dt/hltau);
hl3_old = hl3ss-(hl3ss-hl3_old)*exp(-dt/hl3tau);
jl_old = jlss-(jlss-jl_old)*exp(-dt/jltau);
jl3_old = jl3ss-(jl3ss-jl3_old)*exp(-dt/jl3tau);
real inal2 = gnal2*ml_old*hl_old*jl_old*(v_old-ena);
real inal3 = gnal3*ml3_old*hl3_old*jl3_old*(v_old-ena);
real inal = inal2 + inal3;
// comp_inab()
real inab = pnab*frdy*((frdy*v_old)/(R*temp))*(nassl_old*exp((frdy*v_old)/(R*temp)) - nao)/(exp((frdy*v_old)/(R*temp))-1);
// comp_ical()
real ibarca = pca*zca*zca*(((v_old-15)*frdy*frdy)/(R*temp))*((gacai*casss_old*exp((zca*(v_old-15)*frdy)/(R*temp))-gacao*cao)/(exp((zca*(v_old-15)*frdy)/(R*temp))-1));
real dss = (1/(1.0+exp(-(v_old-2.0)/7.8)));
real dtau = (0.59+0.8*exp(0.052*(v_old+13))/(1+exp(0.132*(v_old+13))));
real fss = 1/(1.0 + exp((v_old+16.5)/9.5));
real ftau = 0.92/(0.125*exp(-(0.058*(v_old-2.5))*(0.045*(v_old-2.5)))+0.1);
real f2ss = fss;
real f2tau = 0.90/(0.02*exp(-(0.04*(v_old-18.6))*(0.045*(v_old-18.6)))+0.005);
real fcass = 0.3/(1 - ical/0.05) + 0.55/(1.0+casss_old/0.003)+0.15;
real fcatau = 10*camkactive/(camkactive+kmcam) + 0.5+1/(1.0+casss_old/0.003);
real fca2ss = 1.0/(1.0-ical/0.01);
real fca2tau = 1*(300.0/(1.0+exp((-ical-0.175)/0.04))+125.0);
// Rush-Larsen
d_old = dss-(dss-d_old)*exp(-dt/dtau);
f_old = fss-(fss-f_old)*exp(-dt/ftau);
f2_old = f2ss-(f2ss-f2_old)*exp(-dt/f2tau);
fca_old = fcass-(fcass-fca_old)*exp(-dt/fcatau);
fca2_old = fca2ss-(fca2ss-fca2_old)*exp(-dt/fca2tau);
ical = d_old*f_old*f2_old*fca_old*fca2_old*ibarca;
// comp_icat()
real bss = 1/(1+ exp (-(v_old+30)/7));
real gss = 1/(1+exp((v_old+61)/5));
real taub = 1/(1.068*exp((v_old+16.3)/30)+1.068*exp(-(v_old+16.3)/30));
real taug = 1/(0.015*exp(-(v_old+71.7)/83.3)+0.015*exp((v_old+71.7)/15.4));
// Rush-Larsen
b_old = bss-(bss-b_old)*exp(-dt/taub);
g_old = gss-(gss-g_old)*exp(-dt/taug);
real icat = gcat*b_old*g_old*(v_old-eca);
// comp_icab()
real icab = pcab*zca*zca*((v_old*frdy*frdy)/(R*temp))*((gacai*cassl_old*exp((zca*v_old*frdy)/(R*temp))-gacao*cao)/(exp((zca*v_old*frdy)/(R*temp))-1));
// comp_itol()
real atau = 1/(25*exp((v_old-82)/18)/(1+exp((v_old-82)/18))+25*exp(-(v_old+52)/18)/(1+exp(-(v_old+52)/18)));
real itau = 2.86+ 1/(exp(-(v_old+125)/15)*0.1 + 0.1*exp((v_old+2)/26.5));
real i2tau = 21.5+ 1/(exp(-(v_old+138.2)/52)*0.005 + 0.003*exp((v_old+18)/12.5));
real ass = 1/(1+exp(-(v_old-8.9)/10.3));
real iss = 1/(1+exp((v_old+30)/11));
real i2ss = iss;
// Rush-Larsen
a_old = ass-(ass-a_old)*exp(-dt/atau);
i_old = iss-(iss-i_old)*exp(-dt/itau);
i2_old = i2ss-(i2ss-i2_old)*exp(-dt/i2tau);
real itos = gtos*a_old*i_old*i2_old*(v_old-ek);
real itof = gtof*(v_old-ek)/(1+exp(-(v_old-3)/19.8));
real ito1 = itos + itof;
// comp_ikr()
real gkr = 0.0326*sqrt(ko/5.4);
real xrss = 1/(1+exp(-(v_old)/15));
real xrtau = 400.0/(1.0+exp(v_old/10.0)) + 100.0;
real rkr = 1/(1+exp((v_old)/35));
// Rush-Larsen
xr_old = xrss-(xrss-xr_old)*exp(-dt/xrtau);
real ikr = gkr*xr_old*rkr*(v_old-ek);
// comp_iks()
real eks = (R*temp/frdy)*log((ko+prnak*nao)/(ki_old+prnak*nassl_old));
real gks = 0.053*(1+0.6/(1+pow((0.000038/cassl_old),1.4)));
real xsss = 1/(1+exp(-(v_old-9)/13.7));
real xs1tau = 200/(exp(-(v_old+10)/6) + exp((v_old-62)/55));
real xs2tau = 1500+ 350/(exp(-(v_old+10)/4) + exp((v_old-90)/58));
// Rush-Larsen
xs1_old = xsss-(xsss-xs1_old)*exp(-dt/xs1tau);
xs2_old = xsss-(xsss-xs2_old)*exp(-dt/xs2tau);
real iks = gks*xs1_old*xs2_old*(v_old-eks);
// comp_ik1()
real k1ss = 1/(1+exp((v_old+103-(2.9+ko*2.175))/10.15));
real gk1 = 0.12*sqrt(ko);
real ik1 = gk1*k1ss*(v_old-ek);
// comp_inaca()
real allo = 1/(1+pow((kmcaact/(1.5*casss_old)),2));
real num = inacamax*(pow(nasss_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*casss_old*exp((nu-1)*v_old*frdy/(R*temp)));
real denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp));
real denomterm1 = kmcao*pow(nasss_old,3)+pow(kmnao,3)*1.5*casss_old+pow(kmnai1,3)*cao*(1+1.5*casss_old/kmcai);
real denomterm2 = kmcai*pow(nao,3)*(1+pow(nasss_old/kmnai1,3))+pow(nasss_old,3)*cao+pow(nao,3)*1.5*casss_old;
real deltaE = num/(denommult*(denomterm1+denomterm2));
real inacass = 0.2*allo*deltaE;
allo = 1/(1+pow((kmcaact/(1.5*cassl_old)),2));
num = inacamax*(pow(nassl_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*cassl_old*exp((nu-1)*v_old*frdy/(R*temp)));
denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp));
denomterm1 = kmcao*pow(nassl_old,3)+pow(kmnao,3)*1.5*cassl_old+pow(kmnai1,3)*cao*(1+1.5*cassl_old/kmcai);
denomterm2 = kmcai*pow(nao,3)*(1+pow(nassl_old/kmnai1,3))+pow(nassl_old,3)*cao+pow(nao,3)*1.5*cassl_old;
deltaE = num/(denommult*(denomterm1+denomterm2));
real inaca = 0.8*allo*deltaE;
// comp_inak()
real inak = ibarnak*(1/(1+exp(-1*(v_old+92)*frdy/(R*temp))))*pow((nassl_old/(nassl_old+2.6)),3)*(ko/(ko+0.8));
// comp_ipca()
real ipca = ipcabar/((kmpca/cassl_old)+1);
// comp_if()
real yss = 1/(1+exp((v_old+87)/9.5));
real ytau = 2000/(exp(-(v_old+132)/10) + exp((v_old+57)/60));
// Rush-Larsen
y_old = yss - (yss-y_old)*exp(-dt/ytau);
real ifna = 0.012*y_old*y_old*(v_old-ena);
real ifk = 0.024*y_old*y_old*(v_old-ek);
//real iftotal = ifna + ifk;
// comp_istim()
real istim = stim_current;
// comp_itot()
real icatot = ical+icat+ipca+icab-2*inaca-2*inacass;
real iktot = ikr+iks+ik1-2*inak+ito1+ifk+1*istim;
real inatot = 3*inak+ina+3*inaca+3*inacass+inal+ifna+inab;
real itot = icatot+iktot+inatot;
// comp_ip3()
// Forward Euler
real du = dt*(casss_old*k2*(1-u_old) - k2a*u_old);
u_old += du;
real POip3 = tauip3r*IP3*casss_old*(1-u_old)/((1+IP3*k0/k0a)*(1+casss_old*k1/k1a));
real qip3 = 10.920*(cajsr_old-casss_old)*(POip3);
// comp_qrel1()
real qdiff = (casss_old-cassl_old)/sstau;
real REL = -((ical)*acap/(vsss*2.0*frdy) - (qrel1 + qip3)*vjsr/vsss + qdiff);
real ireltau = 2*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cajsr_old));
real irelss;
if (REL > 0)
irelss = 15*(1+1*(1/(1+pow((0.28/camkactive),8))))*REL/(1 + pow((1.0/cajsr_old),8));
else
irelss = 0;
// Forward Euler
qrel1 += dt*((irelss-qrel1)/ireltau);
// comp_qrel2()
real qgap = (cassl_old-cai_old)/gaptau;
REL = (-qup2*vnsr/vmyo + qgap*vssl/vmyo+ (qrel2)*vcsr/vmyo);
ireltau = 6*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cacsr_old));
if (REL > 0)
irelss = 91*(1+1*(1/(1+pow((0.28/camkactive),8))))*(REL)/(1 + pow((1/cacsr_old),8));
else
irelss = 0;
// Forward Euler
qrel2 += dt*((irelss-qrel2)/ireltau);
// comp_qup1()
real dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive);
real dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive);
real qup1 = 0.0002*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cassl_old,1))-0.00105*cansr_old/nsrbar;
dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive);
dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive);
qup2 = 0.0026*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cai_old,1))-0.0042*cansr_old/nsrbar;
// comp_qtr1()
real qtr1 = (cansr_old-cajsr_old)/tautr1;
// comp_qtr2()
real qtr2 = (cansr_old-cacsr_old)/tautr2;
// comp_conc()
qdiff = (casss_old-cassl_old)/sstau;
qgap = (cassl_old-cai_old)/gaptau;
real qdiffna = (nasss_old-nassl_old)/sstau;
real qgapna = (nassl_old-nai_old)/gaptau;
// Forward Euler
real dcasss = dt*(-(ical-2*inacass)*acap/(vsss*2.0*frdy)+(qrel1+qip3)*vjsr/vsss-qdiff);
real bsss = 1/(1+(bsrbar*kmbsr/pow(kmbsr+casss_old,2))+(bslbar*kmbsl/pow(kmbsl+casss_old,2)));
casss_old += bsss*dcasss;
// Forward Euler
real dcassl = dt*(-(qup1)*vnsr/vssl+qdiff*vsss/vssl-qgap-(icat+ipca+icab-2*inaca)*acap/(vssl*2.0*frdy));
real trpn = trpnbar1*(cassl_old/(cassl_old+kmtrpn));
real cmdn = cmdnbar1*(cassl_old/(cassl_old+kmcmdn));
real catotal = trpn+cmdn+dcassl+cassl_old;
real bmyo = cmdnbar1+trpnbar1-catotal+kmtrpn+kmcmdn;
real cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar1*kmcmdn)+cmdnbar1*kmtrpn;
real dmyo = -kmtrpn*kmcmdn*catotal;
cassl_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0;
real dcajsr = dt*(qtr1-qrel1-qip3);
real csqn1 = csqnbar1*(cajsr_old/(cajsr_old+kmcsqn));
real bjsr = csqnbar1 - csqn1-cajsr_old-dcajsr+kmcsqn;
real cjsr = kmcsqn*(csqn1+cajsr_old+dcajsr);
cajsr_old = (sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2;
real dcacsr = dt*(qtr2-qrel2);
real csqn = csqnbar*(cacsr_old/(cacsr_old+kmcsqn));
real bcsr = csqnbar - csqn-cacsr_old-dcacsr+kmcsqn;
real ccsr = kmcsqn*(csqn+cacsr_old+dcacsr);
cacsr_old = (sqrt(bcsr*bcsr+4*ccsr)-bcsr)/2;
// Forward Euler
real dcansr = dt*(qup1+qup2-qtr1*vjsr/vnsr-qtr2*vcsr/vnsr);
cansr_old += dcansr;
// Forward Euler
real dnasss = dt*((-(3*inacass)*acap)/((vsss)*zna*frdy)-qdiffna);
nasss_old += dnasss;
// Forward Euler
real dnassl = dt*((-(3*inak+ina+inal+3*inaca+ifna+inab)*acap)/((vssl)*zna*frdy)+qdiffna*vsss/vssl-qgapna);
nassl_old += dnassl;
// Forward Euler
real dnai = dt*(qgapna*vssl/vmyo);
nai_old += dnai;
// Forward Euler
real dki = dt*((-iktot*acap)/((vmyo+vssl+vsss)*zk*frdy));
ki_old += dki;
// Forward Euler
real dcai = dt*(-(qup2)*vnsr/vmyo+qgap*vssl/vmyo+(qrel2)*vcsr/vmyo);
trpn = trpnbar*(cai_old/(cai_old+kmtrpn));
cmdn = cmdnbar*(cai_old/(cai_old+kmcmdn));
catotal = trpn+cmdn+dcai+cai_old;
bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn;
cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar*kmcmdn)+cmdnbar*kmtrpn;
dmyo = -kmtrpn*kmcmdn*catotal;
cai_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0;
// Unused ...
//real caavg = (casss_old*vsss+cassl*vssl+cai_old*vmyo)/(vsss+vmyo+vssl);
real camkbound = camk0*(1-camktrap_old)*1/(1+(kmcam/casss_old));
// Forward Euler
camktrap_old = dt*(alphacamk*camkbound*(camkbound+camktrap_old)-betacamk*camktrap_old) + camktrap_old;
camkactive = camkbound+camktrap_old;
real dvdt = -itot;
v_old += dt*dvdt;
// Rush-Larsen
rDY_[1] = m_old;
rDY_[2] = h_old;
rDY_[3] = j_old;
rDY_[4] = d_old;
rDY_[5] = f_old;
rDY_[6] = f2_old;
rDY_[7] = fca_old;
rDY_[8] = fca2_old;
rDY_[9] = xs1_old;
rDY_[10] = xs2_old;
rDY_[11] = xr_old;
rDY_[12] = a_old;
rDY_[13] = i_old;
rDY_[14] = i2_old;
rDY_[15] = ml_old;
rDY_[16] = ml3_old;
rDY_[17] = hl_old;
rDY_[18] = hl3_old;
rDY_[19] = jl_old;
rDY_[20] = jl3_old;
rDY_[31] = b_old;
rDY_[32] = g_old;
rDY_[34] = y_old;
// Forward Euler (I already calculated the Forward Euler scheme here ...)
rDY_[0] = v_old;
rDY_[21] = casss_old;
rDY_[22] = cajsr_old;
rDY_[23] = cacsr_old;
rDY_[24] = cansr_old;
rDY_[25] = cassl_old;
rDY_[26] = nai_old;
rDY_[27] = nassl_old;
rDY_[28] = nasss_old;
rDY_[29] = ki_old;
rDY_[30] = cai_old;
rDY_[33] = u_old;
rDY_[35] = camktrap_old;
rDY_[36] = ical;
rDY_[37] = camkactive;
rDY_[38] = qrel1;
rDY_[39] = qrel2;
rDY_[40] = qup2;
} |
Example_tasking.12.c | /*
* @@name: tasking.12c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: rt-error
* @@version: omp_3.1
*/
#include <stdio.h>
void foo ( )
{
int x = 2;
#pragma omp task mergeable
{
x++;
}
#pragma omp taskwait
printf("%d\n",x); // prints 2 or 3
}
|
GB_unop__identity_int64_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_int8)
// op(A') function: GB (_unop_tran__identity_int64_int8)
// C type: int64_t
// A type: int8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_int8)
(
int64_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr26943-4.c | /* PR c++/26943 */
/* { dg-do run } */
extern int omp_set_dynamic (int);
extern int omp_get_thread_num (void);
extern void abort (void);
int a = 8, b = 12, c = 16, d = 20, j = 0, l = 0;
char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d";
volatile int k;
int
main (void)
{
int i;
omp_set_dynamic (0);
omp_set_nested (1);
#pragma omp parallel num_threads (2) reduction (+:l) \
firstprivate (a, b, c, d, e, f, g, h, j)
if (k == omp_get_thread_num ())
{
#pragma omp parallel for shared (a, e) firstprivate (b, f) \
lastprivate (c, g) private (d, h) \
schedule (static, 1) num_threads (4) \
reduction (+:j)
for (i = 0; i < 4; i++)
{
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b')
j++;
#pragma omp barrier
#pragma omp atomic
a += i;
b += i;
c = i;
d = i;
#pragma omp atomic
e[0] += i;
f[0] += i;
g[0] = 'g' + i;
h[0] = 'h' + i;
#pragma omp barrier
if (a != 8 + 6 || b != 12 + i || c != i || d != i)
j += 8;
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i)
j += 64;
if (h[0] != 'h' + i)
j += 512;
}
if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20)
++l;
if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd')
l += 8;
}
if (l)
abort ();
if (a != 8 || b != 12 || c != 16 || d != 20)
abort ();
if (e[0] != 'a' || f[0] != 'b' || g[0] != 'c' || h[0] != 'd')
abort ();
return 0;
}
|
color_transforms.h | /******************************************************************************
* Copyright (c) 2013 Aitor Aldoma
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
******************************************************************************/
#pragma once
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <v4r/core/macros.h>
#include <vector>
#include <omp.h>
namespace v4r
{
class V4R_EXPORTS ColorTransform
{
public:
typedef boost::shared_ptr< ColorTransform > Ptr;
virtual ~ColorTransform() {}
virtual
Eigen::VectorXf
do_conversion(unsigned char R, unsigned char G, unsigned char B) const = 0;
virtual void
do_inverse_conversion(const Eigen::VectorXf &converted_color, unsigned char &R, unsigned char &G, unsigned char &B) const
{
(void) converted_color;
(void) R;
(void) G;
(void) B;
std::cerr << "Inverse conversion is not implemented!" << std::endl;
}
virtual size_t getOutputNumColorCompenents() const = 0;
template<typename PointT>
V4R_EXPORTS void
convert(const pcl::PointCloud<PointT> &cloud, Eigen::MatrixXf &converted_color) const
{
converted_color = Eigen::MatrixXf (cloud.points.size(), getOutputNumColorCompenents());
#pragma omp parallel for schedule (dynamic)
for(size_t i=0; i < cloud.points.size(); i++)
{
const PointT &p = cloud.points[i];
unsigned char r = (unsigned char)p.r;
unsigned char g = (unsigned char)p.g;
unsigned char b = (unsigned char)p.b;
converted_color.row(i) = do_conversion( r, g, b);
}
}
};
class V4R_EXPORTS RGB2GrayScale : public ColorTransform
{
public:
typedef boost::shared_ptr< RGB2GrayScale > Ptr;
size_t getOutputNumColorCompenents() const { return 1; }
Eigen::VectorXf
do_conversion(unsigned char R, unsigned char G, unsigned char B) const
{
Eigen::VectorXf c(1);
c(0) = 0.2126f * R/255.f + 0.7152f * G/255.f + 0.0722f * B/255.f;
return c;
}
};
}
|
GB_binop__islt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_uint8
// A.*B function (eWiseMult): GB_AemultB__islt_uint8
// A*D function (colscale): GB_AxD__islt_uint8
// D*A function (rowscale): GB_DxB__islt_uint8
// C+=B function (dense accum): GB_Cdense_accumB__islt_uint8
// C+=b function (dense accum): GB_Cdense_accumb__islt_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_uint8
// C=scalar+B GB_bind1st__islt_uint8
// C=scalar+B' GB_bind1st_tran__islt_uint8
// C=A+scalar GB_bind2nd__islt_uint8
// C=A'+scalar GB_bind2nd_tran__islt_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_UINT8 || GxB_NO_ISLT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__islt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_indirectIndex.c | // A loop with array references using indirect indexing
//
// Conventional parallelization algorithms will not parallelize the loop
// since indirect indexing may result in overlapped elements being accessed,
// which in turn introduces loop carried dependencies.
//
// However, if users can provide semantics that the indirect indexing will
// not result in overlapping elements (or unique elements), the loop can be parallelized.
//
// This is a simplified version based on code examples provided by Jeff Keasler.
//
// Liao, 5/12/2009
#define length 100
#include <omp.h>
double eps[100];
int zoneset[100];
void StressCheckEpsFail(double eps_failure_model)
{
int i;
int index;
#pragma omp parallel for private (index,i) firstprivate (eps_failure_model)
for (i = 0; i <= 99; i += 1) {
index = zoneset[i];
eps[zoneset[i]] = eps_failure_model * 1.01;
eps[zoneset[i]] = 1.01;
}
}
// a multi level definition chain
void StressCheckEpsFaili2(double eps_failure_model)
{
int i;
int index1;
#pragma omp parallel for private (index1,i) firstprivate (eps_failure_model)
for (i = 0; i <= 99; i += 1) {
index1 = zoneset[i];
int index2 = index1;
eps[zoneset[i]] = eps_failure_model * 1.01;
eps[zoneset[i]] = 1.01;
}
}
// a multi dimensional case
void foo()
{
int n = 100;
int m = 100;
double b[n][m];
int i;
int j;
int index;
int zoneset[m];
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (index,j)
for (j = 0; j <= m - 1; j += 1) {
index = zoneset[j];
b[i][zoneset[j]] = b[i - 1][index - 1];
}
}
}
|
cpd.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "cpd.h"
#include "matrix.h"
#include "mttkrp.h"
#include "timer.h"
#include "thd_info.h"
#include "util.h"
#include <math.h>
#include <omp.h>
#ifndef __AVX512F__
#define SPLATT_USE_DSYRK
#endif
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_cpd_als(
splatt_csf const * const tensors,
splatt_idx_t const nfactors,
double const * const options,
splatt_kruskal * factored)
{
matrix_t * mats[MAX_NMODES+1];
idx_t nmodes = tensors->nmodes;
rank_info rinfo;
rinfo.rank = 0;
/* allocate factor matrices */
idx_t maxdim = tensors->dims[argmax_elem(tensors->dims, nmodes)];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) mat_rand(tensors[0].dims[m], nfactors);
}
mats[MAX_NMODES] = mat_alloc(maxdim, nfactors);
val_t * lambda = splatt_malloc(nfactors * sizeof(*lambda));
/* do the factorization! */
factored->fit = cpd_als_iterate(tensors, mats, lambda, nfactors, &rinfo,
options);
/* store output */
factored->rank = nfactors;
factored->nmodes = nmodes;
factored->lambda = lambda;
for(idx_t m=0; m < nmodes; ++m) {
factored->dims[m] = tensors->dims[m];
factored->factors[m] = mats[m]->vals;
}
/* clean up */
mat_free(mats[MAX_NMODES]);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]); /* just the matrix_t ptr, data is safely in factored */
}
return SPLATT_SUCCESS;
}
void splatt_free_kruskal(
splatt_kruskal * factored)
{
free(factored->lambda);
for(idx_t m=0; m < factored->nmodes; ++m) {
#if SPLATT_MAT_HBW
splatt_hbw_free(factored->factors[m]);
#else
splatt_free(factored->factors[m]);
#endif
}
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Resets serial and MPI timers that were activated during some CPD
* pre-processing.
*
* @param rinfo MPI rank information.
*/
static void p_reset_cpd_timers(
rank_info const * const rinfo)
{
timer_reset(&timers[TIMER_ATA]);
#ifdef SPLATT_USE_MPI
timer_reset(&timers[TIMER_MPI]);
timer_reset(&timers[TIMER_MPI_IDLE]);
timer_reset(&timers[TIMER_MPI_COMM]);
timer_reset(&timers[TIMER_MPI_ATA]);
timer_reset(&timers[TIMER_MPI_REDUCE]);
timer_reset(&timers[TIMER_MPI_NORM]);
timer_reset(&timers[TIMER_MPI_UPDATE]);
timer_reset(&timers[TIMER_MPI_FIT]);
MPI_Barrier(rinfo->comm_3d);
#endif
}
/**
* @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent
* to via computing <X,X>, the inner product of X with itself. We find
* this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard
* product.
*
* @param nmodes The number of modes in the tensor.
* @param lambda The vector of column norms.
* @param aTa An array of Gram Matrices (AtA, BtB, ...).
*
* @return The Frobenius norm of X, squared.
*/
static val_t p_kruskal_norm(
idx_t const nmodes,
val_t const * const restrict lambda,
matrix_t ** aTa)
{
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
val_t norm_mats = 0;
/* use aTa[MAX_NMODES] as scratch space */
for(idx_t x=0; x < rank*rank; ++x) {
av[x] = 1.;
}
/* aTa[MAX_NMODES] = hada(aTa) */
for(idx_t m=0; m < nmodes; ++m) {
val_t const * const restrict atavals = aTa[m]->vals;
for(idx_t x=0; x < rank*rank; ++x) {
av[x] *= atavals[x];
}
}
/* now compute lambda^T * aTa[MAX_NMODES] * lambda */
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=0; j < rank; ++j) {
norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j];
}
}
return fabs(norm_mats);
}
/**
* @brief Compute the inner product of a Kruskal tensor and an unfactored
* tensor. Assumes that 'm1' contains the MTTKRP result along the last
* mode of the two input tensors. This naturally follows the end of a
* CPD iteration.
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
*
* @return The inner product of the two tensors, computed via:
* 1^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_tt_kruskal_inner(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1)
{
idx_t const rank = mats[0]->J;
idx_t const lastm = nmodes - 1;
idx_t const dim = m1->I;
val_t const * const m0 = mats[lastm]->vals;
val_t const * const mv = m1->vals;
val_t myinner = 0;
#pragma omp parallel reduction(+:myinner)
{
int const tid = omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
for(idx_t r=0; r < rank; ++r) {
accumF[r] = 0.;
}
#pragma omp for
for(idx_t i=0; i < dim; ++i) {
for(idx_t r=0; r < rank; ++r) {
accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)];
}
}
/* accumulate everything into 'myinner' */
for(idx_t r=0; r < rank; ++r) {
myinner += accumF[r] * lambda[r];
}
}
val_t inner = 0.;
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_FIT]);
timer_start(&timers[TIMER_MPI_IDLE]);
MPI_Barrier(rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_IDLE]);
MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_FIT]);
#else
inner = myinner;
#endif
return inner;
}
/**
* @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This
* is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)].
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param ttnormsq The norm (squared) of the original input tensor, <X,X>.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
* @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc.
*
* @return The inner product of the two tensors, computed via:
* \lambda^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_calc_fit(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const ttnormsq,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_FIT]);
/* First get norm of new model: lambda^T * (hada aTa) * lambda. */
val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa);
/* Compute inner product of tensor with new model */
val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1);
val_t const residual = sqrt(ttnormsq + norm_mats - (2 * inner));
timer_stop(&timers[TIMER_FIT]);
return 1 - (residual / sqrt(ttnormsq));
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
double cpd_als_iterate(
splatt_csf const * const tensors,
matrix_t ** mats,
val_t * const lambda,
idx_t const nfactors,
rank_info * const rinfo,
double const * const opts)
{
idx_t const nmodes = tensors[0].nmodes;
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
/* Compute thread-private scratch size for reduction */
idx_t reduction_scratch_size = 0;
for(int m=0; m < nmodes; ++m) {
splatt_csf_type which = (splatt_csf_type)opts[SPLATT_OPTION_CSF_ALLOC];
idx_t outdepth = MAX_NMODES;
switch(which) {
case SPLATT_CSF_ONEMODE:
outdepth = csf_mode_depth(m, tensors[0].dim_perm, nmodes);
if(outdepth > 0) {
if(mttkrp_use_privatization(tensors->nnz, mats[m]->I, opts)) {
printf("mode %d use privatization\n", m);
reduction_scratch_size = SS_MAX(reduction_scratch_size, mats[m]->I);
}
}
break;
case SPLATT_CSF_TWOMODE:
if(m != tensors[0].dim_perm[nmodes-1]) { /* longest mode handled via second tensor's root */
outdepth = csf_mode_depth(m, tensors[0].dim_perm, nmodes);
if(outdepth > 0) {
if(mttkrp_use_privatization(tensors->nnz, mats[m]->I, opts)) {
printf("mode %d use privatization\n", m);
reduction_scratch_size = SS_MAX(reduction_scratch_size, mats[m]->I);
}
}
}
break;
default:
break;
}
}
/* Setup thread structures. + 64 bytes is to avoid false sharing.
* TODO make this better */
omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nfactors * nfactors * sizeof(val_t)) + 64,
(nfactors * reduction_scratch_size * sizeof(val_t)) + 64,
(nmodes * nfactors * sizeof(val_t)) + 64);
matrix_t * m1 = mats[MAX_NMODES];
/* Initialize first A^T * A mats. We redundantly do the first because it
* makes communication easier. */
matrix_t * aTa[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
aTa[m] = mat_alloc(nfactors, nfactors);
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
}
/* used as buffer space */
aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors);
/* Compute input tensor norm */
double oldfit = 0;
double fit = 0;
val_t ttnormsq = csf_frobsq(tensors);
/* setup timers */
p_reset_cpd_timers(rinfo);
sp_timer_t itertime;
sp_timer_t modetime[MAX_NMODES];
timer_start(&timers[TIMER_CPD]);
idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER];
for(idx_t it=0; it < niters; ++it) {
timer_fstart(&itertime);
for(idx_t m=0; m < nmodes; ++m) {
timer_fstart(&modetime[m]);
mats[MAX_NMODES]->I = tensors[0].dims[m];
m1->I = mats[m]->I;
/* M1 = X * (C o B) */
timer_start(&timers[TIMER_MTTKRP]);
mttkrp_csf(tensors, mats, m, thds, opts);
timer_stop(&timers[TIMER_MTTKRP]);
#ifdef SPLATT_USE_DSYRK
par_memcpy(mats[m]->vals, m1->vals, m1->I * nfactors * sizeof(val_t));
mat_solve_normals(m, nmodes, aTa, mats[m], 0.);
#else
calc_gram_inv(m, nmodes, aTa);
/* A = M1 * M2 */
mat_matmul(m1, aTa[MAX_NMODES], mats[m]);
#endif
/* normalize columns and extract lambda */
if(it == 0) {
mat_normalize(mats[m], lambda, MAT_NORM_2, rinfo, thds, nthreads);
} else {
mat_normalize(mats[m], lambda, MAT_NORM_MAX, rinfo, thds,nthreads);
}
/* update A^T*A */
mat_aTa(mats[m], aTa[m], rinfo, thds, nthreads);
timer_stop(&modetime[m]);
} /* foreach mode */
fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, mats, m1, aTa);
timer_stop(&itertime);
if(rinfo->rank == 0 &&
opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) {
printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n",
it+1, itertime.seconds, fit, fit - oldfit);
if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) {
for(idx_t m=0; m < nmodes; ++m) {
printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1,
modetime[m].seconds);
}
}
}
if(it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE]) {
break;
}
oldfit = fit;
}
timer_stop(&timers[TIMER_CPD]);
cpd_post_process(nfactors, nmodes, mats, lambda, thds, nthreads, rinfo);
/* CLEAN UP */
for(idx_t m=0; m < nmodes; ++m) {
mat_free(aTa[m]);
}
mat_free(aTa[MAX_NMODES]);
thd_free(thds, nthreads);
return fit;
}
void cpd_post_process(
idx_t const nfactors,
idx_t const nmodes,
matrix_t ** mats,
val_t * const lambda,
thd_info * const thds,
idx_t const nthreads,
rank_info * const rinfo)
{
val_t * tmp = splatt_malloc(nfactors * sizeof(*tmp));
/* normalize each matrix and adjust lambda */
for(idx_t m=0; m < nmodes; ++m) {
mat_normalize(mats[m], tmp, MAT_NORM_2, rinfo, thds, nthreads);
for(idx_t f=0; f < nfactors; ++f) {
lambda[f] *= tmp[f];
}
}
free(tmp);
}
|
SoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SoftMax.c"
#else
void THNN_(SoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
real *input_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t;
if (input->nDimension == 1)
{
nframe = 1;
dim = input->size[0];
stride = 1;
}
else if (input->nDimension == 2)
{
nframe = input->size[0];
dim = input->size[1];
stride = 1;
}
else if (input->nDimension == 3)
{
nframe = 1;
dim = input->size[0];
stride = input->size[1]*input->size[2];
}
else if (input->nDimension == 4)
{
nframe = input->size[0];
dim = input->size[1];
stride = input->size[2]*input->size[3];
}
else
{
THArgCheck(0, 2, "1D, 2D, 3D or 4D tensor expected");
}
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(t)
for (t = 0; t < stride*nframe; t++)
{
real *input_ptr = input_data + (t/stride)*dim*stride + t % stride;
real *output_ptr = output_data + (t/stride)*dim*stride + t % stride;
real inputMax = -THInf;
accreal sum;
ptrdiff_t d;
for (d = 0; d < dim; d++)
{
if (input_ptr[d*stride] >= inputMax) inputMax = input_ptr[d*stride];
}
sum = 0;
for (d = 0; d < dim; d++)
{
real z = exp(input_ptr[d*stride] - inputMax);
output_ptr[d*stride] = z;
sum += z;
}
for (d = 0; d < dim; d++)
{
output_ptr[d*stride] *= 1/sum;
}
}
THTensor_(free)(input);
}
void THNN_(SoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(input, gradOutput);
real *gradInput_data, *gradOutput_data, *output_data;
ptrdiff_t nframe = 0, dim = 0, stride = 0;
ptrdiff_t t;
if (output->nDimension == 1)
{
nframe = 1;
dim = output->size[0];
stride = 1;
}
else if (output->nDimension == 2)
{
nframe = output->size[0];
dim = output->size[1];
stride = 1;
}
else if (output->nDimension == 3)
{
nframe = 1;
dim = output->size[0];
stride = output->size[1]*output->size[2];
}
else if (output->nDimension == 4)
{
nframe = output->size[0];
dim = output->size[1];
stride = output->size[2]*output->size[3];
}
else
{
THError("1D, 2D, 3D or 4D tensor expected");
}
gradOutput = THTensor_(newContiguous)(gradOutput);
output = THTensor_(newContiguous)(output);
THTensor_(resizeAs)(gradInput, output);
gradInput_data = THTensor_(data)(gradInput);
output_data = THTensor_(data)(output);
gradOutput_data = THTensor_(data)(gradOutput);
#pragma omp parallel for private(t)
for (t = 0; t < stride*nframe; t++)
{
real *gradInput_ptr = gradInput_data + (t/stride)*dim*stride + t % stride;
real *output_ptr = output_data + (t/stride)*dim*stride + t % stride;
real *gradOutput_ptr = gradOutput_data + (t/stride)*dim*stride + t % stride;
ptrdiff_t d;
accreal sum = 0;
for (d = 0; d < dim; d++)
sum += (accreal)gradOutput_ptr[d*stride] * output_ptr[d*stride];
for (d = 0; d < dim; d++)
gradInput_ptr[d*stride] = output_ptr[d*stride] * (gradOutput_ptr[d*stride] - sum);
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
epir_selector_factory.c |
#include "epir.h"
static inline int epir_selector_factory_ctx_init_(
epir_selector_factory_ctx *ctx,
const bool is_fast, const unsigned char *key, const uint32_t capacity_zero, const uint32_t capacity_one) {
ctx->is_fast = is_fast;
memcpy(ctx->key, key, 32);
ctx->capacities[0] = capacity_zero;
ctx->capacities[1] = capacity_one;
ctx->ciphers[0] = malloc(sizeof(unsigned char) * EPIR_CIPHER_SIZE * capacity_zero);
if(ctx->ciphers[0] == NULL) return -1;
ctx->ciphers[1] = malloc(sizeof(unsigned char) * EPIR_CIPHER_SIZE * capacity_one);
if(ctx->ciphers[1] == NULL) return -1;
ctx->idx[0] = ctx->idx[1] = -1;
int ret;
if((ret = pthread_mutex_init(&ctx->mutex, NULL)) != 0) return ret;
return 0;
}
int epir_selector_factory_ctx_init(
epir_selector_factory_ctx *ctx,
const unsigned char *pubkey, const uint32_t capacity_zero, const uint32_t capacity_one) {
return epir_selector_factory_ctx_init_(ctx, false, pubkey, capacity_zero, capacity_one);
}
int epir_selector_factory_ctx_init_fast(
epir_selector_factory_ctx *ctx,
const unsigned char *privkey, const uint32_t capacity_zero, const uint32_t capacity_one) {
return epir_selector_factory_ctx_init_(ctx, true, privkey, capacity_zero, capacity_one);
}
int epir_selector_factory_ctx_destroy(epir_selector_factory_ctx *ctx) {
free(ctx->ciphers[0]);
free(ctx->ciphers[1]);
int ret;
if((ret = pthread_mutex_destroy(&ctx->mutex)) != 0) return ret;
return 0;
}
int epir_selector_factory_fill_sync(epir_selector_factory_ctx *ctx) {
epir_ecelgamal_encrypt_fn *encrypt = ctx->is_fast ? epir_ecelgamal_encrypt_fast : epir_ecelgamal_encrypt;
int ret = 0;
for(size_t msg=0; msg<2; msg++) {
int32_t needs = ctx->capacities[msg] - ctx->idx[msg] - 1;
#pragma omp parallel for
for(int32_t i=0; i<needs; i++) {
unsigned char cipher[EPIR_CIPHER_SIZE];
encrypt(cipher, ctx->key, msg, NULL);
if(pthread_mutex_lock(&ctx->mutex) != 0) {
ret = 1;
continue;
}
const int32_t idx = ++ctx->idx[msg];
if(idx >= (int32_t)ctx->capacities[msg]) {
ret = 2;
continue;
}
memcpy(&ctx->ciphers[msg][idx * EPIR_CIPHER_SIZE], cipher, EPIR_CIPHER_SIZE);
if(pthread_mutex_unlock(&ctx->mutex) != 0) {
ret = 3;
continue;
}
}
}
return ret;
}
static void *epir_selector_factory_thread(void *ctx_) {
epir_selector_factory_ctx *ctx = ctx_;
epir_selector_factory_fill_sync(ctx);
return NULL;
}
int epir_selector_factory_fill(epir_selector_factory_ctx *ctx) {
int ret;
if((ret = pthread_create(&ctx->thread, NULL, epir_selector_factory_thread, ctx)) != 0) return ret;
return 0;
}
int epir_selector_factory_create_selector(
unsigned char *ciphers, epir_selector_factory_ctx *ctx,
const uint64_t *index_counts, const uint8_t n_indexes, const uint64_t idx) {
uint64_t n_ciphers = epir_selector_ciphers_count(index_counts, n_indexes);
epir_selector_create_choice(ciphers, EPIR_CIPHER_SIZE, index_counts, n_indexes, idx);
int ret;
if((ret = pthread_mutex_lock(&ctx->mutex)) != 0) return ret;
for(size_t i=0; i<n_ciphers; i++) {
uint8_t choice = ciphers[i * EPIR_CIPHER_SIZE];
if(ctx->idx[choice] < 0) return -1;
memcpy(&ciphers[i * EPIR_CIPHER_SIZE], &ctx->ciphers[choice][ctx->idx[choice] * EPIR_CIPHER_SIZE], EPIR_CIPHER_SIZE);
ctx->idx[choice]--;
}
if((ret = pthread_mutex_unlock(&ctx->mutex)) != 0) return ret;
return 0;
}
|
infogain_openmp.c | // information gain C-Adaptation
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <errno.h>
#include <sys/time.h>
#include <time.h>
#include <omp.h>
#include "common.h"
#include "cpmidx_double.h"
#include "blaslapack.h"
#include "randnumber.h"
#include "outputtiming.h"
#include "calculatenorm.h"
#include "calculatemaxchange.h"
#include "getentropy.h"
#include "getinfogainforattribute.h"
#include "infogain_openmp.h"
/**
* InfoGain calculates the InformationGain and GainRatio of the matrix "data" according to the classification given by classColumn
*
* Parameter
* data double* IN, matrix to calculate IG and GR for, instance times attribute matrix, attributes stored column wise
* m int IN, first dimension of matrix data (rows)
* n int IN, second dimension of matrix data (columns)
* classColumn int* IN, array of class attributes for every entity (row) in data
* numDistClasses int IN, number of distinct classes for the instances in matrix "data"
* indexInfoGain int* OUT, array of index of attributes sorted descendingly wrt information gain
* infoGainSorted double* OUT, array of the according information gains of these attributes
* indexGainRatio int* OUT, arry of index of attributes sorted descendingly wrt gain ratio
* gainRatioSorted double* OUT, array of the according gain ratio of these attributes
*
*/
int infoGain_openmp(double * data, int m, int n, double * classColumn, int numDistClasses, idx_double * indexedInfoGainSorted, idx_double * indexedGainRatioSorted) {
//check number of supported cores
int numProcs = omp_get_num_procs();
int i; //index variable used in for-loops
int numInstances = m; //number of instances in matrix data
int numAttributes = n; //number of attributes in matrix data
double H = 0.0; //entropy
double * classvalueCount = (double*) malloc(sizeof(double) * numDistClasses * 2);
double * attributevalueCount = (double*) malloc(sizeof(double) * m * 2 * numProcs);
// get Entropy of the classification
H = getEntropy(classColumn, m, 1, classvalueCount, numDistClasses);
double * timing = (double*) malloc(sizeof(double) * numAttributes * 1); //timing in seconds
double splitinfo = 0.0;
struct timeval start, end;
#pragma omp parallel num_threads(numProcs)
{
#pragma omp for private(i, start, end, splitinfo)
for (i = 0; i < numAttributes; ++i) {
int whoami = omp_get_thread_num();
gettimeofday(&start, 0);
indexedInfoGainSorted[i].val = getInfoGainForAttribute(data + i * m, classColumn, numDistClasses, classvalueCount, numInstances, H);
//TODELETE
//printf("indexedInfoGainSorted[%d].val = %f\n", i, indexedInfoGainSorted[i].val);
indexedInfoGainSorted[i].idx = i;
//getEntropy(attributeColumn, noClassification, null) --- null since classvalueCount is not needed
//TODO allocate second variable "attributevalueCount" of size m times 2
splitinfo = getEntropy(data + i * m, m, 0, attributevalueCount + whoami * 2 * numProcs, m);
if (splitinfo != 0.0) {
indexedGainRatioSorted[i].val = indexedInfoGainSorted[i].val / splitinfo;
}
else {
indexedGainRatioSorted[i].val = 0.0;
}
indexedGainRatioSorted[i].idx = i;
gettimeofday(&end, 0);
// save timing in seconds
timing[i] = (double) ( ( 1.0 * end.tv_sec - start.tv_sec) + 1E-6 * (1.0 * (end.tv_usec - start.tv_usec)) );
}
}
//TODO sort infoGain and GainRatio and store new indices of those values in descending order
qsort((void*) indexedInfoGainSorted, numAttributes, sizeof(idx_double), cmpidx_double);
qsort((void*) indexedGainRatioSorted, numAttributes, sizeof(idx_double), cmpidx_double);
//free allocated memory
free(classvalueCount);
free(attributevalueCount);
free(timing);
// return 0 if everything worked fine, else return error code
return 0;
}
|
GB_binop__lor_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_bool
// A.*B function (eWiseMult): GB_AemultB__lor_bool
// A*D function (colscale): GB_AxD__lor_bool
// D*A function (rowscale): GB_DxB__lor_bool
// C+=B function (dense accum): GB_Cdense_accumB__lor_bool
// C+=b function (dense accum): GB_Cdense_accumb__lor_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_bool
// C=scalar+B GB_bind1st__lor_bool
// C=scalar+B' GB_bind1st_tran__lor_bool
// C=A+scalar GB_bind2nd__lor_bool
// C=A'+scalar GB_bind2nd_tran__lor_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij || bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x || y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_BOOL || GxB_NO_LOR_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lor_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool bij = Bx [p] ;
Cx [p] = (x || bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
Cx [p] = (aij || y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x || aij) ; \
}
GrB_Info GB_bind1st_tran__lor_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij || y) ; \
}
GrB_Info GB_bind2nd_tran__lor_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Main.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
int main( int argc, char* argv[] )
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 14;
int mype = 0;
int max_procs = omp_get_num_procs();
int i, thread, mat;
unsigned long seed;
double omp_start, omp_end, p_energy;
unsigned long long vhash = 0;
int nprocs;
#ifdef MPI
MPI_Status stat;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
#ifdef VERIFICATION
srand(26);
#else
srand(time(NULL));
#endif
// Process CLI Fields -- store in "Inputs" structure
Inputs in = read_CLI( argc, argv );
// Set number of OpenMP Threads
omp_set_num_threads(in.nthreads);
// Print-out of Input Summary
if( mype == 0 )
print_inputs( in, nprocs, version );
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// Allocate & fill energy grids
#ifndef BINARY_READ
if( mype == 0) printf("Generating Nuclide Energy Grids...\n");
#endif
NuclideGridPoint ** nuclide_grids = gpmatrix(in.n_isotopes,in.n_gridpoints);
#ifdef VERIFICATION
generate_grids_v( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#else
generate_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// Sort grids by energy
#ifndef BINARY_READ
if( mype == 0) printf("Sorting Nuclide Energy Grids...\n");
sort_nuclide_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// If using a unionized grid search, initialize the energy grid
// Otherwise, leave these as null
GridPoint * energy_grid = NULL;
int * index_data = NULL;
if( in.grid_type == UNIONIZED )
{
// Prepare Unionized Energy Grid Framework
#ifndef BINARY_READ
energy_grid = generate_energy_grid( in.n_isotopes,
in.n_gridpoints, nuclide_grids );
#else
energy_grid = (GridPoint *)malloc( in.n_isotopes *
in.n_gridpoints * sizeof( GridPoint ) );
index_data = (int *) malloc( in.n_isotopes * in.n_gridpoints
* in.n_isotopes * sizeof(int));
for( i = 0; i < in.n_isotopes*in.n_gridpoints; i++ )
energy_grid[i].xs_ptrs = &index_data[i*in.n_isotopes];
#endif
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
#ifndef BINARY_READ
set_grid_ptrs( energy_grid, nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
}
#ifdef BINARY_READ
if( mype == 0 ) printf("Reading data from \"XS_data.dat\" file...\n");
binary_read(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid, in.grid_type);
#endif
// Get material data
if( mype == 0 )
printf("Loading Mats...\n");
int *num_nucs = load_num_nucs(in.n_isotopes);
int **mats = load_mats(num_nucs, in.n_isotopes);
#ifdef VERIFICATION
double **concs = load_concs_v(num_nucs);
#else
double **concs = load_concs(num_nucs);
#endif
#ifdef BINARY_DUMP
if( mype == 0 ) printf("Dumping data to binary file...\n");
binary_dump(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid, in.grid_type);
if( mype == 0 ) printf("Binary file \"XS_data.dat\" written! Exiting...\n");
return 0;
#endif
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
// Outer benchmark loop can loop through all possible # of threads
#ifdef BENCHMARK
for( int bench_n = 1; bench_n <=omp_get_num_procs(); bench_n++ )
{
in.nthreads = bench_n;
omp_set_num_threads(in.nthreads);
#endif
if( mype == 0 )
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
omp_start = omp_get_wtime();
//initialize papi with one thread (master) here
#ifdef PAPI
if ( PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT){
fprintf(stderr, "PAPI library init error!\n");
exit(1);
}
#endif
// OpenMP compiler directives - declaring variables as shared or private
#pragma omp parallel default(none) \
private(i, thread, p_energy, mat, seed) \
shared( max_procs, in, energy_grid, nuclide_grids, \
mats, concs, num_nucs, mype, vhash)
{
// Initialize parallel PAPI counters
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#pragma omp critical
{
counter_init(&eventset, &num_papi_events);
}
#endif
double macro_xs_vector[5];
double * xs = (double *) calloc(5, sizeof(double));
// Initialize RNG seeds for threads
thread = omp_get_thread_num();
seed = (thread+1)*19+17;
// XS Lookup Loop
#pragma omp for schedule(dynamic)
for( i = 0; i < in.lookups; i++ )
{
// Status text
if( INFO && mype == 0 && thread == 0 && i % 1000 == 0 )
printf("\rCalculating XS's... (%.0lf%% completed)",
(i / ( (double)in.lookups / (double) in.nthreads ))
/ (double) in.nthreads * 100.0);
// Randomly pick an energy and material for the particle
#ifdef VERIFICATION
#pragma omp critical
{
p_energy = rn_v();
mat = pick_mat(&seed);
}
#else
p_energy = rn(&seed);
mat = pick_mat(&seed);
#endif
// debugging
//printf("E = %lf mat = %d\n", p_energy, mat);
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
calculate_macro_xs( p_energy, mat, in.n_isotopes,
in.n_gridpoints, num_nucs, concs,
energy_grid, nuclide_grids, mats,
macro_xs_vector, in.grid_type );
// Copy results from above function call onto heap
// so that compiler cannot optimize function out
// (only occurs if -flto flag is used)
memcpy(xs, macro_xs_vector, 5*sizeof(double));
// Verification hash calculation
// This method provides a consistent hash accross
// architectures and compilers.
#ifdef VERIFICATION
char line[256];
sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf",
p_energy, mat,
macro_xs_vector[0],
macro_xs_vector[1],
macro_xs_vector[2],
macro_xs_vector[3],
macro_xs_vector[4]);
unsigned long long vhash_local = hash(line, 10000);
#pragma omp atomic
vhash += vhash_local;
#endif
}
// Prints out thread local PAPI counters
#ifdef PAPI
if( mype == 0 && thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#pragma omp barrier
}
counter_stop(&eventset, num_papi_events);
#endif
}
#ifndef PAPI
if( mype == 0)
{
printf("\n" );
printf("Simulation complete.\n" );
}
#endif
omp_end = omp_get_wtime();
// Print / Save Results and Exit
print_results( in, mype, omp_end-omp_start, nprocs, vhash );
#ifdef BENCHMARK
}
#endif
#ifdef MPI
MPI_Finalize();
#endif
return 0;
}
|
task-two.c | /*
* task-two.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#define NUM_THREADS 2
int main(int argc, char *argv[]) {
int var = 0;
int i;
#pragma omp parallel for num_threads(NUM_THREADS) shared(var) schedule(static, \
1)
for (i = 0; i < NUM_THREADS; i++) {
#pragma omp task shared(var) if (0) // the task is inlined an executed locally
{ var++; }
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-two.c:30
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-two.c:30
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) {
for (t4=max(max(ceild(t1-12,16),ceild(4*t2-Nz-19,32)),ceild(24*t3-Ny-19,32));t4<=min(min(min(floord(4*Nt+Nx-9,32),floord(2*t1+Nx-3,32)),floord(4*t2+Nx-9,32)),floord(24*t3+Nx+11,32));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(32*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
cpu_adagrad.h | #pragma once
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <cassert>
#include "cuda.h"
#include "custom_cuda_layers.h"
#include "simd.h"
#define STEP(SPAN) \
void Step_##SPAN(float* _params, \
float* grads, \
float* _exp_avg_sq, \
size_t _param_size, \
__half* dev_param = nullptr, \
bool half_precision = false);
class Adagrad_Optimizer {
public:
Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0)
: _alpha(alpha), _eps(eps), _weight_decay(weight_decay), _buf_index(false)
{
cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
_streams[0] = Context::Instance().GetCurrentStream();
_streams[1] = Context::Instance().GetNewStream();
}
~Adagrad_Optimizer()
{
cudaFreeHost(_doubled_buffer[0]);
cudaFreeHost(_doubled_buffer[1]);
}
template <int span>
void Step_AVX(size_t* rounded_size,
float* _params,
float* grads,
float* _exp_avg_sq,
size_t param_size,
__half* dev_param = nullptr,
bool half_precision = false);
#if defined(__AVX512__) or defined(__AVX256__)
STEP(1)
STEP(4)
STEP(8)
#endif
inline void SynchronizeStreams()
{
for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
}
inline void IncrementStep(size_t step)
{
_step++;
if (_step != step) { _step = step; }
}
inline void update_state(float lr, float epsilon, float weight_decay)
{
_alpha = lr;
_eps = epsilon;
_weight_decay = weight_decay;
}
private:
float _alpha;
float _eps;
float _weight_decay;
float _betta1_t;
float _betta2_t;
size_t _step;
float* _doubled_buffer[2];
bool _buf_index;
cudaStream_t _streams[2];
};
#if defined(__AVX512__) or defined(__AVX256__)
template <int span>
void Adagrad_Optimizer::Step_AVX(size_t* rounded_size,
float* _params,
float* grads,
float* _exp_avg_sq,
size_t _param_size,
__half* dev_params,
bool half_precision)
{
size_t new_rounded_size = 0;
AVX_Data eps_4;
eps_4.data = SIMD_SET(_eps);
float step_size = -1 * _alpha;
AVX_Data step_size_4;
step_size_4.data = SIMD_SET(step_size);
AVX_Data weight_decay4;
if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay);
new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
for (size_t t = 0; t < new_rounded_size; t += TILE) {
size_t copy_size = TILE;
if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
size_t offset = copy_size + t;
if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
#pragma omp parallel for
for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
AVX_Data grad_4[span];
simd_load<span>(grad_4, grads + i, half_precision);
AVX_Data momentum_4[span];
simd_load<span>(momentum_4, grads + i, false);
AVX_Data variance_4[span];
simd_load<span>(variance_4, _exp_avg_sq + i, false);
AVX_Data param_4[span];
simd_load<span>(param_4, _params + i, half_precision);
if (_weight_decay > 0) { simd_fma<span>(grad_4, param_4, weight_decay4, grad_4); }
simd_fma<span>(variance_4, grad_4, grad_4, variance_4);
simd_sqrt<span>(grad_4, variance_4);
simd_add<span>(grad_4, grad_4, eps_4);
simd_div<span>(grad_4, momentum_4, grad_4);
simd_fma<span>(param_4, grad_4, step_size_4, param_4);
simd_store<span>(_params + i, param_4, half_precision);
if (dev_params) {
simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
}
simd_store<span>(_exp_avg_sq + i, variance_4, false);
}
if (dev_params) {
if (half_precision)
launch_param_update_half(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
else
launch_param_update(
_doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
_buf_index = !_buf_index;
}
}
*rounded_size = new_rounded_size;
}
#endif
|
shortcut_layer.c | #include "shortcut_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include "gemm.h"
#include <stdio.h>
#include <assert.h>
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2, int assisted_excitation, ACTIVATION activation, int train)
{
if(assisted_excitation) fprintf(stderr, "Shortcut Layer - AE: %d\n", index);
else fprintf(stderr,"Shortcut Layer: %d\n", index);
layer l = { (LAYER_TYPE)0 };
l.train = train;
l.type = SHORTCUT;
l.batch = batch;
l.activation = activation;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.assisted_excitation = assisted_excitation;
if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2);
l.index = index;
if (train) l.delta = (float*)calloc(l.outputs * batch, sizeof(float));
l.output = (float*)calloc(l.outputs * batch, sizeof(float));
l.forward = forward_shortcut_layer;
l.backward = backward_shortcut_layer;
#ifndef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float));
#endif // GPU
#ifdef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs);
l.forward_gpu = forward_shortcut_layer_gpu;
l.backward_gpu = backward_shortcut_layer_gpu;
if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
if (l.assisted_excitation)
{
const int size = l.out_w * l.out_h * l.batch;
l.gt_gpu = cuda_make_array(NULL, size);
l.a_avg_gpu = cuda_make_array(NULL, size);
}
#endif // GPU
return l;
}
void resize_shortcut_layer(layer *l, int w, int h)
{
//assert(l->w == l->out_w);
//assert(l->h == l->out_h);
l->w = l->out_w = w;
l->h = l->out_h = h;
l->outputs = w*h*l->out_c;
l->inputs = l->outputs;
if (l->train) l->delta = (float*)realloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)realloc(l->output, l->outputs * l->batch * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
if (l->train) {
cuda_free(l->delta_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
}
#endif
}
void forward_shortcut_layer(const layer l, network_state state)
{
if (l.w == l.out_w && l.h == l.out_h && l.c == l.out_c) {
int size = l.batch * l.w * l.h * l.c;
int i;
#pragma omp parallel for
for(i = 0; i < size; ++i)
l.output[i] = state.input[i] + state.net.layers[l.index].output[i];
}
else {
copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
shortcut_cpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
}
//activate_array(l.output, l.outputs*l.batch, l.activation);
if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation);
if (l.assisted_excitation && state.train) assisted_excitation_forward(l, state);
}
void backward_shortcut_layer(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta);
else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta);
else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
}
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
if (l.assisted_excitation && state.train) assisted_excitation_forward_gpu(l, state);
}
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
#endif
|
ejercicio7.c | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define PRINTF_ALL
#define VECTOR_DYNAMIC //descomentar para que los vectores sean variables ...
//dinΓ‘micas (memoria reautilizable durante la ejecuciΓ³n)
#ifdef VECTOR_GLOBAL
#define MAX 33554432
double matriz[MAX], matriz2[MAX], resultado[MAX];
#endif
int main(int argc, char** argv){
int i,j, temporal,k;
struct timespec cgt1,cgt2;
double ncgt; //para tiempo de ejecuciΓ³n
if(argc<3){
printf("Faltan nΒΊ componentes de las matrices <nΒΊ_filas_matriz_y_nΒΊ_columnas_matriz> o chunk\n");
exit(-1);
}
unsigned int N=atoi(argv[1]);
unsigned int chunk=atoi(argv[2]);
omp_set_schedule(N,chunk); //modificamos run-sched-var
int **matriz, *vector, *resultado;
//Reservamos espacio pa la matriz
//*******************************
matriz= (int**) malloc(N*sizeof(int*));
for(i=0;i<N;i++)
matriz[i]=(int *) malloc((N-i)*sizeof(int)); //escalonamos la matriz
//Reservamos memoria para los vectores
vector= (int*) malloc(N*sizeof(int));
resultado=(int *) malloc(N*sizeof(int));
//*******************************
if((matriz==NULL) || (vector==NULL) || (resultado==NULL)){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
//Inicializar matrices
#pragma parallel for
for(i=0;i<N;i++){
for(j=0;j<N-i;j++){
matriz[i][j]= i*j;
}
}
//Inicializamos los vectores
#pragma parallel for
for(i=0;i<N;i++)
vector[i]=i+10;
#pragma parallel for
for(i=0;i<N;i++){
resultado[i]=0;
}
//***********************
clock_gettime(CLOCK_REALTIME,&cgt1);
//Calcular multiplicaciΓ³n de la matrices
//**************************************
#pragma omp parallel for firstprivate(temporal) lastprivate(temporal)schedule(guided,chunk)
for(i=0;i<N;i++){
resultado[i]=0;
#pragma omp parallel for reduction(+:temporal)
for(j=0;j<N-i;j++){
temporal+=matriz[i][j] * vector[i];
#pragma omp atomic
resultado[i]+=temporal;
}
}
//**************************************
clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec) + (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
#ifdef PRINTF_ALL
printf("Tiempo(seg.): %11.9f\t / TamaΓ±o Vectores:%u\n",ncgt,N);
/*
for(i=0;i<N;i++){
for(j=0;j<N-i;j++)
printf("/matriz[%d][%d]*vector[%d](%d*%d=%d)/\n", i,j,i,matriz[i][j],vector[i],matriz[i][j] * vector[i]);
}
printf("Resultado final resultante:\n");
for(i=0;i<N;i++){
for(j=0;j<N-i;j++)
printf("resultado[%d]= %d\n", i,resultado[i]);
}
*/
#else
printf("Tiempo(seg.): %11.9f\t / TamaΓ±o Vectores:%u\n",
ncgt,N,matriz[0][0],vector[0],resultado[0],N-1,N-1,N-1,matriz[N-1][N-1],vector[N-1],resultado[N-1]);
#endif
#ifdef VECTOR_DYNAMIC
free(matriz); //libera el espacio reservado para v1
free(vector); //libera el espacio reservado para v2
free(resultado); //libera el espacio reservado para v3
#endif
return 0;
} |
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void
setPreInitStmt(Stmt *S,
OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc),
LParenLoc(LParenLoc), Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(llvm::omp::OMPC_allocator, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc,
LParenLoc, EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond),
ColonLoc(ColonLoc), NameModifier(NameModifier),
NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc),
LParenLoc(LParenLoc), Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc),
LParenLoc(LParenLoc), Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(llvm::omp::OMPC_collapse, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) {
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::DefaultKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
llvm::omp::ProcBindKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(),
SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause()
: OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
/// Also, this class represents 'update' clause in '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) update(in)
/// \endcode
/// In this example directive '#pragma omp depobj' has 'update' clause with 'in'
/// dependence kind.
class OMPUpdateClause final
: public OMPClause,
private llvm::TrailingObjects<OMPUpdateClause, SourceLocation,
OpenMPDependClauseKind> {
friend class OMPClauseReader;
friend TrailingObjects;
/// true if extended version of the clause for 'depobj' directive.
bool IsExtended = false;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<SourceLocation>) const {
// 2 locations: for '(' and argument location.
return IsExtended ? 2 : 0;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setLParenLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<SourceLocation>() = Loc;
}
/// Sets the the location of '(' in clause for 'depobj' directive.
void setArgumentLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*std::next(getTrailingObjects<SourceLocation>(), 1) = Loc;
}
/// Sets the dependence kind for the clause for 'depobj' directive.
void setDependencyKind(OpenMPDependClauseKind DK) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<OpenMPDependClauseKind>() = DK;
}
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc,
bool IsExtended)
: OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc),
IsExtended(IsExtended) {}
/// Build an empty clause.
OMPUpdateClause(bool IsExtended)
: OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()),
IsExtended(IsExtended) {}
public:
/// Creates clause for 'atomic' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Creates clause for 'depobj' directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ArgumentLoc Location of the argument.
/// \param DK Dependence kind.
/// \param EndLoc Ending location of the clause.
static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ArgumentLoc,
OpenMPDependClauseKind DK,
SourceLocation EndLoc);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param IsExtended true if extended clause for 'depobj' directive must be
/// created.
static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended);
/// Checks if the clause is the extended clauses for 'depobj' directive.
bool isExtended() const { return IsExtended; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
/// Gets the the location of '(' in clause for 'depobj' directive.
SourceLocation getLParenLoc() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<SourceLocation>();
}
/// Gets the the location of argument in clause for 'depobj' directive.
SourceLocation getArgumentLoc() const {
assert(IsExtended && "Expected extended clause.");
return *std::next(getTrailingObjects<SourceLocation>(), 1);
}
/// Gets the dependence kind in clause for 'depobj' directive.
OpenMPDependClauseKind getDependencyKind() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<OpenMPDependClauseKind>();
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_seq_cst;
}
};
/// This represents 'acq_rel' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acq_rel
/// \endcode
/// In this example directive '#pragma omp flush' has 'acq_rel' clause.
class OMPAcqRelClause final : public OMPClause {
public:
/// Build 'ack_rel' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcqRelClause()
: OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_acq_rel;
}
};
/// This represents 'acquire' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush acquire
/// \endcode
/// In this example directive '#pragma omp flush' has 'acquire' clause.
class OMPAcquireClause final : public OMPClause {
public:
/// Build 'acquire' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPAcquireClause()
: OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_acquire;
}
};
/// This represents 'release' clause in the '#pragma omp atomic|flush'
/// directives.
///
/// \code
/// #pragma omp flush release
/// \endcode
/// In this example directive '#pragma omp flush' has 'release' clause.
class OMPReleaseClause final : public OMPClause {
public:
/// Build 'release' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReleaseClause()
: OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_release;
}
};
/// This represents 'relaxed' clause in the '#pragma omp atomic'
/// directives.
///
/// \code
/// #pragma omp atomic relaxed
/// \endcode
/// In this example directive '#pragma omp atomic' has 'relaxed' clause.
class OMPRelaxedClause final : public OMPClause {
public:
/// Build 'relaxed' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPRelaxedClause()
: OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_relaxed;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Optional lastprivate kind, e.g. 'conditional', if specified by user.
OpenMPLastprivateModifier LPKind;
/// Optional location of the lasptrivate kind, if specified by user.
SourceLocation LPKindLoc;
/// Optional colon location, if specified by user.
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
unsigned N)
: OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
/// Sets lastprivate kind.
void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; }
/// Sets location of the lastprivate kind.
void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; }
/// Sets colon symbol location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param LPKind Lastprivate kind, e.g. 'conditional'.
/// \param LPKindLoc Location of the lastprivate kind.
/// \param ColonLoc Location of the ':' symbol if lastprivate kind is used.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc,
SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Lastprivate kind.
OpenMPLastprivateModifier getKind() const { return LPKind; }
/// Returns the location of the lastprivate kind.
SourceLocation getKindLoc() const { return LPKindLoc; }
/// Returns the location of the ':' symbol, if any.
SourceLocation getColonLoc() const { return ColonLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Reduction modifier.
OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown;
/// Reduction modifier location.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
OpenMPReductionClauseModifier Modifier, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction,
SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets reduction modifier.
void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; }
/// Sets location of the modifier.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper copy operations for inscan reductions.
/// The form is: Temps[i] = LHS[i];
void setInscanCopyOps(ArrayRef<Expr *> Ops);
/// Get the list of helper inscan copy operations.
MutableArrayRef<Expr *> getInscanCopyOps() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyOps() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
/// Set list of helper temp vars for inscan copy array operations.
void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps);
/// Get the list of helper inscan copy temps.
MutableArrayRef<Expr *> getInscanCopyArrayTemps() {
return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayTemps() const {
return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size());
}
/// Set list of helper temp elements vars for inscan copy array operations.
void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems);
/// Get the list of helper inscan copy temps.
MutableArrayRef<Expr *> getInscanCopyArrayElems() {
return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(),
varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayElems() const {
return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param CopyOps List of copy operations for inscan reductions:
/// \code
/// TempExprs = LHSExprs;
/// \endcode
/// \param CopyArrayTemps Temp arrays for prefix sums.
/// \param CopyArrayElems Temp arrays for prefix sums.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier,
ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps,
ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param Modifier Reduction modifier.
static OMPReductionClause *
CreateEmpty(const ASTContext &C, unsigned N,
OpenMPReductionClauseModifier Modifier);
/// Returns modifier.
OpenMPReductionClauseModifier getModifier() const { return Modifier; }
/// Returns modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range copy_ops() const {
return helper_expr_const_range(getInscanCopyOps().begin(),
getInscanCopyOps().end());
}
helper_expr_range copy_ops() {
return helper_expr_range(getInscanCopyOps().begin(),
getInscanCopyOps().end());
}
helper_expr_const_range copy_array_temps() const {
return helper_expr_const_range(getInscanCopyArrayTemps().begin(),
getInscanCopyArrayTemps().end());
}
helper_expr_range copy_array_temps() {
return helper_expr_range(getInscanCopyArrayTemps().begin(),
getInscanCopyArrayTemps().end());
}
helper_expr_const_range copy_array_elems() const {
return helper_expr_const_range(getInscanCopyArrayElems().begin(),
getInscanCopyArrayElems().end());
}
helper_expr_range copy_array_elems() {
return helper_expr_range(getInscanCopyArrayElems().begin(),
getInscanCopyArrayElems().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range used_children() const {
auto Children = const_cast<OMPReductionClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(
llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction,
StartLoc, LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc,
LParenLoc, EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear,
SourceLocation(), SourceLocation(),
SourceLocation(), NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
MutableArrayRef<Expr *> getUsedExprs() {
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
/// Sets the list of used expressions for the linear clause.
void setUsedExprs(ArrayRef<Expr *> UE);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
using used_expressions_iterator = MutableArrayRef<Expr *>::iterator;
using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator;
using used_expressions_range =
llvm::iterator_range<used_expressions_iterator>;
using used_expressions_const_range =
llvm::iterator_range<used_expressions_const_iterator>;
used_expressions_range used_expressions() {
return finals_range(getUsedExprs().begin(), getUsedExprs().end());
}
used_expressions_const_range used_expressions() const {
return finals_const_range(getUsedExprs().begin(), getUsedExprs().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPLinearClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc,
LParenLoc, EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned,
SourceLocation(), SourceLocation(),
SourceLocation(), NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate,
StartLoc, LParenLoc, EndLoc, N) {
}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_flush;
}
};
/// This represents implicit clause 'depobj' for the '#pragma omp depobj'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// depobj' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has implicit clause 'depobj'
/// with the depobj 'a'.
class OMPDepobjClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Chunk size.
Expr *Depobj = nullptr;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc),
LParenLoc(LParenLoc) {}
/// Build an empty clause.
///
explicit OMPDepobjClause()
: OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {}
void setDepobj(Expr *E) { Depobj = E; }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
public:
/// Creates clause.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param Depobj depobj expression associated with the 'depobj' directive.
static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, Expr *Depobj);
/// Creates an empty clause.
///
/// \param C AST context.
static OMPDepobjClause *CreateEmpty(const ASTContext &C);
/// Returns depobj expression associated with the clause.
Expr *getDepobj() { return Depobj; }
const Expr *getDepobj() const { return Depobj; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&Depobj),
reinterpret_cast<Stmt **>(&Depobj) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDepobjClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_depobj;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc,
LParenLoc, EndLoc, N),
NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend,
SourceLocation(), SourceLocation(),
SourceLocation(), N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Sets optional dependency modifier.
void setModifier(Expr *DepModifier);
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, Expr *DepModifier,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Return optional depend modifier.
Expr *getModifier();
const Expr *getModifier() const {
return const_cast<OMPDependClause *>(this)->getModifier();
}
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device clause modifier.
OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown;
/// Location of the modifier.
SourceLocation ModifierLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
/// Sets modifier.
void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; }
/// Setst modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
public:
/// Build 'device' clause.
///
/// \param Modifier Clause modifier.
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param ModifierLoc Modifier location.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ModifierLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier),
ModifierLoc(ModifierLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
/// Gets modifier.
OpenMPDeviceClauseModifier getModifier() const { return Modifier; }
/// Gets modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause()
: OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Pair of Expression and Non-contiguous pair associated with the
/// component.
llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration,
bool IsNonContiguous)
: AssociatedExpressionNonContiguousPr(AssociatedExpression,
IsNonContiguous),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const {
return AssociatedExpressionNonContiguousPr.getPointer();
}
bool isNonContiguous() const {
return AssociatedExpressionNonContiguousPr.getInt();
}
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// Whether this clause is possible to have user-defined mappers associated.
/// It should be true for map, to, and from clauses, and false for
/// use_device_ptr and is_device_ptr.
const bool SupportsMapper;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param SupportsMapper Indicates whether this clause is possible to have
/// user-defined mappers associated.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
return llvm::makeArrayRef<Expr *>(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Whether this clause is possible to have user-defined mappers associated.
const bool SupportsMapper;
// The user-defined mapper associated with the current declaration.
ArrayRef<Expr *>::iterator MapperCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components, bool SupportsMapper,
ArrayRef<Expr *> Mappers)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
SupportsMapper(SupportsMapper),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
if (SupportsMapper)
MapperCur = Mappers.begin();
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components, bool SupportsMapper,
ArrayRef<Expr *> Mappers)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components,
SupportsMapper, Mappers) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
if (SupportsMapper)
++MapperCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::tuple<const ValueDecl *, MappableExprComponentListRef,
const ValueDecl *>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
const ValueDecl *Mapper = nullptr;
if (SupportsMapper && *MapperCur)
Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl());
return std::make_tuple(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize),
Mapper);
}
std::tuple<const ValueDecl *, MappableExprComponentListRef,
const ValueDecl *>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
if (SupportsMapper)
++MapperCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef(), SupportsMapper,
SupportsMapper ? getUDMapperRefs() : llvm::None);
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()),
SupportsMapper, llvm::None);
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef(), SupportsMapper,
SupportsMapper ? getUDMapperRefs() : llvm::None);
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes,
/*SupportsMapper=*/true) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfOMPMapClauseModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMapClauseModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMapClauseModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom)
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
auto Children = const_cast<OMPMapClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param Priority Expression associated with this clause.
/// \param HelperPriority Helper priority for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *Priority, Stmt *HelperPriority,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) {
setPreInitStmt(HelperPriority, CaptureRegion);
}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPPriorityClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNumTasksClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause()
: OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(),
SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc),
LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind),
KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(),
SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Motion-modifiers for the 'to' clause.
OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = {
OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown};
/// Location of motion-modifiers for the 'to' clause.
SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers];
/// Colon location.
SourceLocation ColonLoc;
/// Build clause with number of variables \a NumVars.
///
/// \param TheMotionModifiers Motion-modifiers.
/// \param TheMotionModifiersLoc Locations of motion-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers,
ArrayRef<SourceLocation> TheMotionModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
assert(llvm::array_lengthof(MotionModifiersLoc) ==
TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes,
/*SupportsMapper=*/true) {}
/// Set motion-modifier for the clause.
///
/// \param I index for motion-modifier.
/// \param T motion-modifier for the clause.
void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) {
assert(I < NumberOfOMPMotionModifiers &&
"Unexpected index to store motion modifier, exceeds array size.");
MotionModifiers[I] = T;
}
/// Set location for the motion-modifier.
///
/// \param I index for motion-modifier location.
/// \param TLoc motion-modifier location.
void setMotionModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMotionModifiers &&
"Index to store motion modifier location exceeds array size.");
MotionModifiersLoc[I] = TLoc;
}
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param MotionModifiers Motion-modifiers.
/// \param MotionModifiersLoc Location of motion-modifiers.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches the motion-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for motion-modifier.
OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MotionModifiers[Cnt];
}
/// Fetches the motion-modifier location at 'Cnt' index of array of modifiers'
/// locations.
///
/// \param Cnt index for motion-modifier location.
SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MotionModifiersLoc[Cnt];
}
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiersLoc);
}
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Motion-modifiers for the 'from' clause.
OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = {
OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown};
/// Location of motion-modifiers for the 'from' clause.
SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers];
/// Colon location.
SourceLocation ColonLoc;
/// Build clause with number of variables \a NumVars.
///
/// \param TheMotionModifiers Motion-modifiers.
/// \param TheMotionModifiersLoc Locations of motion-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers,
ArrayRef<SourceLocation> TheMotionModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
assert(llvm::array_lengthof(MotionModifiersLoc) ==
TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(),
Sizes, /*SupportsMapper=*/true) {}
/// Set motion-modifier for the clause.
///
/// \param I index for motion-modifier.
/// \param T motion-modifier for the clause.
void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) {
assert(I < NumberOfOMPMotionModifiers &&
"Unexpected index to store motion modifier, exceeds array size.");
MotionModifiers[I] = T;
}
/// Set location for the motion-modifier.
///
/// \param I index for motion-modifier location.
/// \param TLoc motion-modifier location.
void setMotionModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfOMPMotionModifiers &&
"Index to store motion modifier location exceeds array size.");
MotionModifiersLoc[I] = TLoc;
}
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param MotionModifiers Motion-modifiers.
/// \param MotionModifiersLoc Location of motion-modifiers.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches the motion-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for motion-modifier.
OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MotionModifiers[Cnt];
}
/// Fetches the motion-modifier location at 'Cnt' index of array of modifiers'
/// locations.
///
/// \param Cnt index for motion-modifier location.
SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfOMPMotionModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MotionModifiersLoc[Cnt];
}
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MotionModifiersLoc);
}
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) {
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr;
}
};
/// This represents clause 'use_device_addr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_addr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_addr' with the variables 'a' and 'b'.
class OMPUseDeviceAddrClause final
: public OMPMappableExprListClause<OMPUseDeviceAddrClause>,
private llvm::TrailingObjects<
OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs,
Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDeviceAddrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDeviceAddrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_use_device_addr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr,
OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr;
}
};
/// This represents clause 'nontemporal' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp simd nontemporal(a)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'nontemporal' for
/// the variable 'a'.
class OMPNontemporalClause final
: public OMPVarListClause<OMPNontemporalClause>,
private llvm::TrailingObjects<OMPNontemporalClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal,
StartLoc, LParenLoc, EndLoc, N) {
}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPNontemporalClause(unsigned N)
: OMPVarListClause<OMPNontemporalClause>(
llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Get the list of privatied copies if the member expression was captured by
/// one of the privatization clauses.
MutableArrayRef<Expr *> getPrivateRefs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateRefs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPNontemporalClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Sets the list of references to private copies created in private clauses.
/// \param VL List of references.
void setPrivateRefs(ArrayRef<Expr *> VL);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range private_refs() {
return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()),
reinterpret_cast<Stmt **>(getPrivateRefs().end()));
}
const_child_range private_refs() const {
auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_nontemporal;
}
};
/// This represents 'order' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp simd order(concurrent)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'order'
/// clause with kind 'concurrent'.
class OMPOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Argument of clause.
void setKind(OpenMPOrderClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'order' clause with argument \p A ('concurrent').
///
/// \param A Argument of the clause ('concurrent').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPOrderClause()
: OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPOrderClauseKind getKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_order;
}
};
/// This represents 'destroy' clause in the '#pragma omp depobj'
/// directive.
///
/// \code
/// #pragma omp depobj(a) destroy
/// \endcode
/// In this example directive '#pragma omp depobj' has 'destroy' clause.
class OMPDestroyClause final : public OMPClause {
public:
/// Build 'destroy' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDestroyClause()
: OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_destroy;
}
};
/// This represents 'detach' clause in the '#pragma omp task' directive.
///
/// \code
/// #pragma omp task detach(evt)
/// \endcode
/// In this example directive '#pragma omp detach' has simple 'detach' clause
/// with the variable 'evt'.
class OMPDetachClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression of the 'detach' clause.
Stmt *Evt = nullptr;
/// Set condition.
void setEventHandler(Expr *E) { Evt = E; }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
public:
/// Build 'detach' clause with event-handler \a Evt.
///
/// \param Evt Event handler expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc),
LParenLoc(LParenLoc), Evt(Evt) {}
/// Build an empty clause.
OMPDetachClause()
: OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {}
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns event-handler expression.
Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); }
child_range children() { return child_range(&Evt, &Evt + 1); }
const_child_range children() const {
return const_child_range(&Evt, &Evt + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_detach;
}
};
/// This represents clause 'inclusive' in the '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a,b)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive'
/// with the variables 'a' and 'b'.
class OMPInclusiveClause final
: public OMPVarListClause<OMPInclusiveClause>,
private llvm::TrailingObjects<OMPInclusiveClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
StartLoc, LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInclusiveClause(unsigned N)
: OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
static OMPInclusiveClause *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInclusiveClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_inclusive;
}
};
/// This represents clause 'exclusive' in the '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan exclusive(a,b)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'exclusive'
/// with the variables 'a' and 'b'.
class OMPExclusiveClause final
: public OMPVarListClause<OMPExclusiveClause>,
private llvm::TrailingObjects<OMPExclusiveClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
StartLoc, LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPExclusiveClause(unsigned N)
: OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
static OMPExclusiveClause *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPExclusiveClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_exclusive;
}
};
/// This represents clause 'uses_allocators' in the '#pragma omp target'-based
/// directives.
///
/// \code
/// #pragma omp target uses_allocators(default_allocator, my_allocator(traits))
/// \endcode
/// In this example directive '#pragma omp target' has clause 'uses_allocators'
/// with the allocators 'default_allocator' and user-defined 'my_allocator'.
class OMPUsesAllocatorsClause final
: public OMPClause,
private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *,
SourceLocation> {
public:
/// Data for list of allocators.
struct Data {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
private:
friend class OMPClauseReader;
friend TrailingObjects;
enum class ExprOffsets {
Allocator,
AllocatorTraits,
Total,
};
enum class ParenLocsOffsets {
LParen,
RParen,
Total,
};
/// Location of '('.
SourceLocation LParenLoc;
/// Total number of allocators in the clause.
unsigned NumOfAllocators = 0;
/// Build clause.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of allocators asssociated with the clause.
OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumOfAllocators(N) {}
/// Build an empty clause.
/// \param N Number of allocators asssociated with the clause.
///
explicit OMPUsesAllocatorsClause(unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(),
SourceLocation()),
NumOfAllocators(N) {}
unsigned numTrailingObjects(OverloadToken<Expr *>) const {
return NumOfAllocators * static_cast<int>(ExprOffsets::Total);
}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Sets the allocators data for the clause.
void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data);
public:
/// Creates clause with a list of allocators \p Data.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param Data List of allocators.
static OMPUsesAllocatorsClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data);
/// Creates an empty clause with the place for \p N allocators.
///
/// \param C AST context.
/// \param N The number of allocators.
static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of allocators associated with the clause.
unsigned getNumberOfAllocators() const { return NumOfAllocators; }
/// Returns data for the specified allocator.
OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const;
// Iterators
child_range children() {
Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
return child_range(Begin, Begin + NumOfAllocators *
static_cast<int>(ExprOffsets::Total));
}
const_child_range children() const {
Stmt *const *Begin =
reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
return const_child_range(
Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total));
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_uses_allocators;
}
};
/// This represents clause 'affinity' in the '#pragma omp task'-based
/// directives.
///
/// \code
/// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i])
/// \endcode
/// In this example directive '#pragma omp task' has clause 'affinity' with the
/// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]'
/// and 'c[i]'.
class OMPAffinityClause final
: public OMPVarListClause<OMPAffinityClause>,
private llvm::TrailingObjects<OMPAffinityClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':' symbol.
SourceLocation ColonLoc;
/// Build clause.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param N Number of locators asssociated with the clause.
OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
/// \param N Number of locators asssociated with the clause.
///
explicit OMPAffinityClause(unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity,
SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Sets the affinity modifier for the clause, if any.
void setModifier(Expr *E) {
getTrailingObjects<Expr *>()[varlist_size()] = E;
}
/// Sets the location of ':' symbol.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a modifier a list of locator items.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param Locators List of locator items.
static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Creates an empty clause with the place for \p N locator items.
///
/// \param C AST context.
/// \param N The number of locator items.
static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets affinity modifier.
Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; }
Expr *getModifier() const {
return getTrailingObjects<Expr *>()[varlist_size()];
}
/// Gets the location of ':' symbol.
SourceLocation getColonLoc() const { return ColonLoc; }
// Iterators
child_range children() {
int Offset = getModifier() ? 1 : 0;
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end() + Offset));
}
const_child_range children() const {
auto Children = const_cast<OMPAffinityClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == llvm::omp::OMPC_affinity;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) Ptr<CLASS>
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "llvm/Frontend/OpenMP/OMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
case llvm::omp::Clause::Enum: \
return Visit##Class(static_cast<PTR(Class)>(S));
#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Clause::Enum: \
break;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
default:
break;
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>;
template <class ImplClass, typename RetTy = void>
class OMPClauseVisitor
: public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
/// Process motion clauses.
template <typename T> void VisitOMPMotionClause(T *Node);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OMP_CLAUSE_CLASS(Enum, Str, Class) \
void Visit##Class(Class *S);
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};
struct OMPTraitProperty {
llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid;
/// The raw string as we parsed it. This is needed for the `isa` trait set
/// (which accepts anything) and (later) extensions.
StringRef RawString;
};
struct OMPTraitSelector {
Expr *ScoreOrCondition = nullptr;
llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid;
llvm::SmallVector<OMPTraitProperty, 1> Properties;
};
struct OMPTraitSet {
llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid;
llvm::SmallVector<OMPTraitSelector, 2> Selectors;
};
/// Helper data structure representing the traits in a match clause of an
/// `declare variant` or `metadirective`. The outer level is an ordered
/// collection of selector sets, each with an associated kind and an ordered
/// collection of selectors. A selector has a kind, an optional score/condition,
/// and an ordered collection of properties.
class OMPTraitInfo {
/// Private constructor accesible only by ASTContext.
OMPTraitInfo() {}
friend class ASTContext;
public:
/// Reconstruct a (partial) OMPTraitInfo object from a mangled name.
OMPTraitInfo(StringRef MangledName);
/// The outermost level of selector sets.
llvm::SmallVector<OMPTraitSet, 2> Sets;
bool anyScoreOrCondition(
llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) {
return llvm::any_of(Sets, [&](OMPTraitSet &Set) {
return llvm::any_of(
Set.Selectors, [&](OMPTraitSelector &Selector) {
return Cond(Selector.ScoreOrCondition,
/* IsScore */ Selector.Kind !=
llvm::omp::TraitSelector::user_condition);
});
});
}
/// Create a variant match info object from this trait info object. While the
/// former is a flat representation the actual main difference is that the
/// latter uses clang::Expr to store the score/condition while the former is
/// independent of clang. Thus, expressions and conditions are evaluated in
/// this method.
void getAsVariantMatchInfo(ASTContext &ASTCtx,
llvm::omp::VariantMatchInfo &VMI) const;
/// Return a string representation identifying this context selector.
std::string getMangledName() const;
/// Check the extension trait \p TP is active.
bool isExtensionActive(llvm::omp::TraitProperty TP) {
for (const OMPTraitSet &Set : Sets) {
if (Set.Kind != llvm::omp::TraitSet::implementation)
continue;
for (const OMPTraitSelector &Selector : Set.Selectors) {
if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension)
continue;
for (const OMPTraitProperty &Property : Selector.Properties) {
if (Property.Kind == TP)
return true;
}
}
}
return false;
}
/// Print a human readable representation into \p OS.
void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
};
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI);
llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
/// Clang specific specialization of the OMPContext to lookup target features.
struct TargetOMPContext final : public llvm::omp::OMPContext {
TargetOMPContext(ASTContext &ASTCtx,
std::function<void(StringRef)> &&DiagUnknownTrait,
const FunctionDecl *CurrentFunctionDecl);
virtual ~TargetOMPContext() = default;
/// See llvm::omp::OMPContext::matchesISATrait
bool matchesISATrait(StringRef RawString) const override;
private:
std::function<bool(StringRef)> FeatureValidityCheck;
std::function<void(StringRef)> DiagUnknownTrait;
llvm::StringMap<bool> FeatureMap;
};
/// Contains data for OpenMP directives: clauses, children
/// expressions/statements (helpers for codegen) and associated statement, if
/// any.
class OMPChildren final
: private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> {
friend TrailingObjects;
friend class OMPClauseReader;
friend class OMPExecutableDirective;
template <typename T> friend class OMPDeclarativeDirective;
/// Numbers of clauses.
unsigned NumClauses = 0;
/// Number of child expressions/stmts.
unsigned NumChildren = 0;
/// true if the directive has associated statement.
bool HasAssociatedStmt = false;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<OMPClause *>) const {
return NumClauses;
}
OMPChildren() = delete;
OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt)
: NumClauses(NumClauses), NumChildren(NumChildren),
HasAssociatedStmt(HasAssociatedStmt) {}
static size_t size(unsigned NumClauses, bool HasAssociatedStmt,
unsigned NumChildren);
static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses);
static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S,
unsigned NumChildren = 0);
static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0);
public:
unsigned getNumClauses() const { return NumClauses; }
unsigned getNumChildren() const { return NumChildren; }
bool hasAssociatedStmt() const { return HasAssociatedStmt; }
/// Set associated statement.
void setAssociatedStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[NumChildren] = S;
}
void setChildren(ArrayRef<Stmt *> Children);
/// Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPChildren *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(HasAssociatedStmt &&
"Expected directive with the associated statement.");
return getTrailingObjects<Stmt *>()[NumChildren];
}
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(),
NumClauses);
}
ArrayRef<OMPClause *> getClauses() const {
return const_cast<OMPChildren *>(this)->getClauses();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *
getCapturedStmt(OpenMPDirectiveKind RegionKind,
ArrayRef<OpenMPDirectiveKind> CaptureRegions) const {
assert(llvm::any_of(
CaptureRegions,
[=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
if (ThisCaptureRegion == RegionKind)
return CS;
CS = cast<CapturedStmt>(CS->getCapturedStmt());
}
llvm_unreachable("Incorrect RegionKind specified for directive.");
}
/// Get innermost captured statement for the construct.
CapturedStmt *
getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) {
assert(hasAssociatedStmt() && "Must have associated captured statement.");
assert(!CaptureRegions.empty() &&
"At least one captured statement must be provided.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (unsigned Level = CaptureRegions.size(); Level > 1; --Level)
CS = cast<CapturedStmt>(CS->getCapturedStmt());
return CS;
}
const CapturedStmt *
getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const {
return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt(
CaptureRegions);
}
MutableArrayRef<Stmt *> getChildren();
ArrayRef<Stmt *> getChildren() const {
return const_cast<OMPChildren *>(this)->getChildren();
}
Stmt *getRawStmt() {
assert(HasAssociatedStmt &&
"Expected directive with the associated statement.");
if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) {
Stmt *S = nullptr;
do {
S = CS->getCapturedStmt();
CS = dyn_cast<CapturedStmt>(S);
} while (CS);
return S;
}
return getAssociatedStmt();
}
const Stmt *getRawStmt() const {
return const_cast<OMPChildren *>(this)->getRawStmt();
}
Stmt::child_range getAssociatedStmtAsRange() {
if (!HasAssociatedStmt)
return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator());
return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren],
&getTrailingObjects<Stmt *>()[NumChildren + 1]);
}
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
rose_livenessTest.c | #include "omp.h"
typedef double real8;
void foo(real8 *y,real8 *d__,real8 *d11,real8 *d12,real8 *d13,real8 *d22,real8 *d23,real8 *d33,real8 *m,int *nell,real8 *p,int t,int flagB,int flagA,int ub)
{
int l;
int nel;
int t1 = t - 1;
if (flagB == 0) {
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
int l36 = l * 36;
real8 h12 = m[(l8 + 0) * 4 + 1];
real8 h13 = m[(l8 + 0) * 4 + 2];
real8 h14 = m[(l8 + 0) * 4 + 3];
real8 h22 = m[(l8 + 1) * 4 + 1];
real8 h23 = m[(l8 + 1) * 4 + 2];
real8 h24 = m[(l8 + 1) * 4 + 3];
real8 h32 = m[(l8 + 2) * 4 + 1];
real8 h33 = m[(l8 + 2) * 4 + 2];
real8 h34 = m[(l8 + 2) * 4 + 3];
real8 h42 = m[(l8 + 3) * 4 + 1];
real8 h43 = m[(l8 + 3) * 4 + 2];
real8 h44 = m[(l8 + 3) * 4 + 3];
real8 h52 = m[(l8 + 4) * 4 + 1];
real8 h53 = m[(l8 + 4) * 4 + 2];
real8 h54 = m[(l8 + 4) * 4 + 3];
real8 h62 = m[(l8 + 5) * 4 + 1];
real8 h63 = m[(l8 + 5) * 4 + 2];
real8 h64 = m[(l8 + 5) * 4 + 3];
real8 h72 = m[(l8 + 6) * 4 + 1];
real8 h73 = m[(l8 + 6) * 4 + 2];
real8 h74 = m[(l8 + 6) * 4 + 3];
real8 h82 = m[(l8 + 7) * 4 + 1];
real8 h83 = m[(l8 + 7) * 4 + 2];
real8 h84 = m[(l8 + 7) * 4 + 3];
real8 ddd = d__[l];
y[l36 + 0] += ddd * (h12 * h12 + h13 * h13 + h14 * h14);
y[l36 + 1] += ddd * (h12 * h22 + h13 * h23 + h14 * h24);
y[l36 + 2] += ddd * (h22 * h22 + h23 * h23 + h24 * h24);
y[l36 + 3] += ddd * (h12 * h32 + h13 * h33 + h14 * h34);
y[l36 + 4] += ddd * (h22 * h32 + h23 * h33 + h24 * h34);
y[l36 + 5] += ddd * (h32 * h32 + h33 * h33 + h34 * h34);
y[l36 + 6] += ddd * (h12 * h42 + h13 * h43 + h14 * h44);
y[l36 + 7] += ddd * (h22 * h42 + h23 * h43 + h24 * h44);
y[l36 + 8] += ddd * (h32 * h42 + h33 * h43 + h34 * h44);
y[l36 + 9] += ddd * (h42 * h42 + h43 * h43 + h44 * h44);
y[l36 + 10] += ddd * (h12 * h52 + h13 * h53 + h14 * h54);
y[l36 + 11] += ddd * (h22 * h52 + h23 * h53 + h24 * h54);
y[l36 + 12] += ddd * (h32 * h52 + h33 * h53 + h34 * h54);
y[l36 + 13] += ddd * (h42 * h52 + h43 * h53 + h44 * h54);
y[l36 + 14] += ddd * (h52 * h52 + h53 * h53 + h54 * h54);
y[l36 + 15] += ddd * (h12 * h62 + h13 * h63 + h14 * h64);
y[l36 + 16] += ddd * (h22 * h62 + h23 * h63 + h24 * h64);
y[l36 + 17] += ddd * (h32 * h62 + h33 * h63 + h34 * h64);
y[l36 + 18] += ddd * (h42 * h62 + h43 * h63 + h44 * h64);
y[l36 + 19] += ddd * (h52 * h62 + h53 * h63 + h54 * h64);
y[l36 + 20] += ddd * (h62 * h62 + h63 * h63 + h64 * h64);
y[l36 + 21] += ddd * (h12 * h72 + h13 * h73 + h14 * h74);
y[l36 + 22] += ddd * (h22 * h72 + h23 * h73 + h24 * h74);
y[l36 + 23] += ddd * (h32 * h72 + h33 * h73 + h34 * h74);
y[l36 + 24] += ddd * (h42 * h72 + h43 * h73 + h44 * h74);
y[l36 + 25] += ddd * (h52 * h72 + h53 * h73 + h54 * h74);
y[l36 + 26] += ddd * (h62 * h72 + h63 * h73 + h64 * h74);
y[l36 + 27] += ddd * (h72 * h72 + h73 * h73 + h74 * h74);
y[l36 + 28] += ddd * (h12 * h82 + h13 * h83 + h14 * h84);
y[l36 + 29] += ddd * (h22 * h82 + h23 * h83 + h24 * h84);
y[l36 + 30] += ddd * (h32 * h82 + h33 * h83 + h34 * h84);
y[l36 + 31] += ddd * (h42 * h82 + h43 * h83 + h44 * h84);
y[l36 + 32] += ddd * (h52 * h82 + h53 * h83 + h54 * h84);
y[l36 + 33] += ddd * (h62 * h82 + h63 * h83 + h64 * h84);
y[l36 + 34] += ddd * (h72 * h82 + h73 * h83 + h74 * h84);
y[l36 + 35] += ddd * (h82 * h82 + h83 * h83 + h84 * h84);
}
if (flagA > 0) {
#pragma omp parallel for private (nel,l) firstprivate (ub,t1)
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
real8 h1 = m[(t1 + l8) * 4 + 1];
real8 h2 = m[(t1 + l8) * 4 + 2];
real8 h3 = m[(t1 + l8) * 4 + 3];
nel = nell[l];
p[nell[l]] += d__[l] * 64. * (h1 * h1 + h2 * h2 + h3 * h3);
}
}
}
else {
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
int l36 = l * 36;
real8 d_11 = d11[l];
real8 d_12 = d12[l];
real8 d_13 = d13[l];
real8 d_22 = d22[l];
real8 d_23 = d23[l];
real8 d_33 = d33[l];
real8 h12 = m[(l8 + 0) * 4 + 1];
real8 h13 = m[(l8 + 0) * 4 + 2];
real8 h14 = m[(l8 + 0) * 4 + 3];
real8 h22 = m[(l8 + 1) * 4 + 1];
real8 h23 = m[(l8 + 1) * 4 + 2];
real8 h24 = m[(l8 + 1) * 4 + 3];
real8 h32 = m[(l8 + 2) * 4 + 1];
real8 h33 = m[(l8 + 2) * 4 + 2];
real8 h34 = m[(l8 + 2) * 4 + 3];
real8 h42 = m[(l8 + 3) * 4 + 1];
real8 h43 = m[(l8 + 3) * 4 + 2];
real8 h44 = m[(l8 + 3) * 4 + 3];
real8 h52 = m[(l8 + 4) * 4 + 1];
real8 h53 = m[(l8 + 4) * 4 + 2];
real8 h54 = m[(l8 + 4) * 4 + 3];
real8 h62 = m[(l8 + 5) * 4 + 1];
real8 h63 = m[(l8 + 5) * 4 + 2];
real8 h64 = m[(l8 + 5) * 4 + 3];
real8 h72 = m[(l8 + 6) * 4 + 1];
real8 h73 = m[(l8 + 6) * 4 + 2];
real8 h74 = m[(l8 + 6) * 4 + 3];
real8 h82 = m[(l8 + 7) * 4 + 1];
real8 h83 = m[(l8 + 7) * 4 + 2];
real8 h84 = m[(l8 + 7) * 4 + 3];
y[l36 + 0] = y[l36 + 0] + h12 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h13 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h14 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 1] = y[l36 + 1] + h22 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h23 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h24 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 2] = y[l36 + 2] + h22 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h23 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h24 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 3] = y[l36 + 3] + h32 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h33 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h34 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 4] = y[l36 + 4] + h32 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h33 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h34 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 5] = y[l36 + 5] + h32 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h33 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h34 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 6] = y[l36 + 6] + h42 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h43 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h44 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 7] = y[l36 + 7] + h42 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h43 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h44 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 8] = y[l36 + 8] + h42 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h43 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h44 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 9] = y[l36 + 9] + h42 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h43 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h44 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 10] = y[l36 + 10] + h52 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h53 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h54 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 11] = y[l36 + 11] + h52 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h53 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h54 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 12] = y[l36 + 12] + h52 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h53 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h54 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 13] = y[l36 + 13] + h52 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h53 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h54 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 14] = y[l36 + 14] + h52 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h53 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h54 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 15] = y[l36 + 15] + h62 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h63 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h64 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 16] = y[l36 + 16] + h62 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h63 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h64 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 17] = y[l36 + 17] + h62 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h63 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h64 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 18] = y[l36 + 18] + h62 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h63 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h64 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 19] = y[l36 + 19] + h62 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h63 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h64 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 20] = y[l36 + 20] + h62 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h63 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h64 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 21] = y[l36 + 21] + h72 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h73 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h74 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 22] = y[l36 + 22] + h72 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h73 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h74 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 23] = y[l36 + 23] + h72 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h73 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h74 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 24] = y[l36 + 24] + h72 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h73 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h74 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 25] = y[l36 + 25] + h72 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h73 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h74 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 26] = y[l36 + 26] + h72 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h73 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h74 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 27] = y[l36 + 27] + h72 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h73 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h74 * (d_13 * h72 + d_23 * h73 + d_33 * h74);
y[l36 + 28] = y[l36 + 28] + h82 * (d_11 * h12 + d_12 * h13 + d_13 * h14) + h83 * (d_12 * h12 + d_22 * h13 + d_23 * h14) + h84 * (d_13 * h12 + d_23 * h13 + d_33 * h14);
y[l36 + 29] = y[l36 + 29] + h82 * (d_11 * h22 + d_12 * h23 + d_13 * h24) + h83 * (d_12 * h22 + d_22 * h23 + d_23 * h24) + h84 * (d_13 * h22 + d_23 * h23 + d_33 * h24);
y[l36 + 30] = y[l36 + 30] + h82 * (d_11 * h32 + d_12 * h33 + d_13 * h34) + h83 * (d_12 * h32 + d_22 * h33 + d_23 * h34) + h84 * (d_13 * h32 + d_23 * h33 + d_33 * h34);
y[l36 + 31] = y[l36 + 31] + h82 * (d_11 * h42 + d_12 * h43 + d_13 * h44) + h83 * (d_12 * h42 + d_22 * h43 + d_23 * h44) + h84 * (d_13 * h42 + d_23 * h43 + d_33 * h44);
y[l36 + 32] = y[l36 + 32] + h82 * (d_11 * h52 + d_12 * h53 + d_13 * h54) + h83 * (d_12 * h52 + d_22 * h53 + d_23 * h54) + h84 * (d_13 * h52 + d_23 * h53 + d_33 * h54);
y[l36 + 33] = y[l36 + 33] + h82 * (d_11 * h62 + d_12 * h63 + d_13 * h64) + h83 * (d_12 * h62 + d_22 * h63 + d_23 * h64) + h84 * (d_13 * h62 + d_23 * h63 + d_33 * h64);
y[l36 + 34] = y[l36 + 34] + h82 * (d_11 * h72 + d_12 * h73 + d_13 * h74) + h83 * (d_12 * h72 + d_22 * h73 + d_23 * h74) + h84 * (d_13 * h72 + d_23 * h73 + d_33 * h74);
y[l36 + 35] = y[l36 + 35] + h82 * (d_11 * h82 + d_12 * h83 + d_13 * h84) + h83 * (d_12 * h82 + d_22 * h83 + d_23 * h84) + h84 * (d_13 * h82 + d_23 * h83 + d_33 * h84);
}
if (flagA > 0) {
#pragma omp parallel for private (nel,l) firstprivate (ub,t1)
for (l = 0; l <= ub - 1; l += 1) {
int l8 = l * 8;
real8 h1 = m[(t1 + l8) * 4 + 1];
real8 h2 = m[(t1 + l8) * 4 + 2];
real8 h3 = m[(t1 + l8) * 4 + 3];
nel = nell[l];
p[nell[l]] += (h1 * (d11[l] * h1 + d12[l] * 2. * h2 + d13[l] * 2. * h3) + h2 * (d22[l] * h2 + d23[l] * 2. * h3) + h3 * d33[l] * h3) * 64.;
}
}
}
}
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) image->columns*cosine)+
fabs((double) image->rows*sine);
gradient->gradient_vector.x1=0.5*(image->columns-distance*cosine);
gradient->gradient_vector.y1=0.5*(image->rows-distance*sine);
gradient->gradient_vector.x2=0.5*(image->columns+distance*cosine);
gradient->gradient_vector.y2=0.5*(image->rows+distance*sine);
}
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt(image->columns*image->columns+
image->rows*image->rows))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) image->columns/2.0;
gradient->radii.y=(double) image->rows/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin(image->columns,image->rows))/
2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops*
sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register size_t
*histogram;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(linear_image,i);
PixelTrait traits=GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if (((paint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(linear_image,p) == 0))
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OilPaintImage)
#endif
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OpaquePaintImage)
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImage)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImageChroma)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
nodal_residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER)
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolver") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolver() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SetMaterialPropertiesToFluid(
ModelPart::NodeIterator itNode,
double &density,
double &deviatoricCoeff,
double &volumetricCoeff,
double timeInterval,
double nodalVolume)
{
density = itNode->FastGetSolutionStepValue(DENSITY);
deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
if (volumetricCoeff > 0)
{
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff);
volumetricCoeff *= bulkReduction;
}
}
void BuildFluidNodally(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
/* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
const double FourThirds = 4.0 / 3.0;
const double nTwoThirds = -2.0 / 3.0;
double theta = 0.5;
array_1d<double, 3> Acc(3, 0.0);
// array_1d<double,6> Sigma(6,0.0);
double pressure = 0;
double dNdXi = 0;
double dNdYi = 0;
double dNdZi = 0;
double dNdXj = 0;
double dNdYj = 0;
double dNdZj = 0;
unsigned int firstRow = 0;
unsigned int firstCol = 0;
double density = 0;
double deviatoricCoeff = 0;
double volumetricCoeff = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// const unsigned int neighSize = neighb_nodes.size()+1;
const unsigned int neighSize = nodalSFDneighboursId.size();
const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if (neighSize > 1 && nodalVolume > 0)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
if (LHS_Contribution.size1() != localSize)
LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != localSize)
RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!!
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
LHS_Contribution = ZeroMatrix(localSize, localSize);
RHS_Contribution = ZeroVector(localSize);
this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume);
firstRow = 0;
firstCol = 0;
if (dimension == 2)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION, 0);
RHS_Contribution[0] += -nodalVolume * density * Acc[0];
RHS_Contribution[1] += -nodalVolume * density * Acc[1];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
// double posX= itNode->X();
// double posY= itNode->Y();
// double coeffX =(12.0-24.0*posY)*pow(posX,4);
// coeffX += (-24.0+48.0*posY)*pow(posX,3);
// coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2);
// coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX;
// coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3);
// double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3);
// coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2);
// coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX;
// coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4);
// RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX;
// RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY;
RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 3> Sigma(3, 0.0);
Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta);
Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]);
RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta;
firstRow += 2;
}
firstRow = 0;
firstCol += 2;
if (i < neighb_nodes.size())
{
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
}
}
/* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */
}
else if (dimension == 3)
{
//////////////////////////// LHS TERMS //////////////////////////////
LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval;
LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval;
//////////////////////////// RHS TERMS //////////////////////////////
//-------- DYNAMIC FORCES TERM -------//
Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval -
itNode->FastGetSolutionStepValue(ACCELERATION, 0);
RHS_Contribution[0] += -nodalVolume * density * Acc[0];
RHS_Contribution[1] += -nodalVolume * density * Acc[1];
RHS_Contribution[2] += -nodalVolume * density * Acc[2];
//-------- EXTERNAL FORCES TERM -------//
array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION);
RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0];
RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1];
RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2];
//-------- INTERNAL FORCES TERM -------//
array_1d<double, 6> Sigma(6, 0.0);
Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS);
pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta);
Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure;
Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure;
Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
for (unsigned int i = 0; i < neighSize; i++)
{
dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol];
dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1];
dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2];
RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]);
RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]);
RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]);
for (unsigned int j = 0; j < neighSize; j++)
{
dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow];
dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1];
dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2];
LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta;
LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta;
firstRow += 3;
}
firstRow = 0;
firstCol += 3;
if (i < neighb_nodes.size())
{
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
}
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
// }
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
Timer::Start("Build");
// boost::timer m_build_time;
BuildFluidNodally(pScheme, rModelPart, A, b);
// std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl;
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
/* boost::timer m_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = OpenMPUtils::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
for (int i = 0; i < static_cast<int>(nelements); ++i)
{
auto it_elem = pElements.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
ConditionsArrayType &pConditions = rModelPart.Conditions();
const int nconditions = static_cast<int>(pConditions.size());
#pragma omp parallel for firstprivate(nconditions, ElementalDofList)
for (int i = 0; i < nconditions; ++i)
{
auto it_cond = pConditions.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back((*it));
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
// boost::timer m_contruct_matrix;
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
// std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId
#ifdef _OPENMP
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
// Getting the array of the conditions
const int nconditions = static_cast<int>(rModelPart.Conditions().size());
ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin();
const std::size_t equation_size = BaseType::mEquationSystemSize;
#ifdef USE_GOOGLE_HASH
std::vector<google::dense_hash_set<std::size_t>> indices(equation_size);
const std::size_t empty_key = 2 * equation_size + 10;
#else
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#endif
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
#ifdef USE_GOOGLE_HASH
indices[iii].set_empty_key(empty_key);
#else
indices[iii].reserve(40);
#endif
}
Element::EquationIdVectorType EquationId;
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
if (EquationId.size() != localSize)
EquationId.resize(localSize, false);
unsigned int firstCol = 0;
const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X);
EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
for (unsigned int i = 0; i < neighb_nodes.size(); i++)
{
firstCol += dimension;
EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId();
EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId();
if (dimension == 3)
{
EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId();
}
}
for (std::size_t i = 0; i < EquationId.size(); i++)
{
if (EquationId[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[EquationId[i]]);
#endif
auto &row_indices = indices[EquationId[i]];
for (auto it = EquationId.begin(); it != EquationId.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[EquationId[i]]);
#endif
}
}
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel for firstprivate(nconditions, ids)
for (int iii = 0; iii < nconditions; iii++)
{
typename ConditionsArrayType::iterator i_condition = cond_begin + iii;
pScheme->EquationId(*i_condition, ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
{
if (ids[i] < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&mlock_array[ids[i]]);
#endif
auto &row_indices = indices[ids[i]];
for (auto it = ids.begin(); it != ids.end(); it++)
{
if (*it < BaseType::mEquationSystemSize)
row_indices.insert(*it);
}
#ifdef _OPENMP
omp_unset_lock(&mlock_array[ids[i]]);
#endif
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
void AssembleRHS(
TSystemVectorType &b,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
GB_binop__bset_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bset_uint8
// A.*B function (eWiseMult): GB_AemultB__bset_uint8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bset_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bset_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_uint8
// C=scalar+B GB_bind1st__bset_uint8
// C=scalar+B' GB_bind1st_tran__bset_uint8
// C=A+scalar GB_bind2nd__bset_uint8
// C=A'+scalar GB_bind2nd_tran__bset_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_BITSET (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITSET (x, y, uint8_t, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT8 || GxB_NO_BSET_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bset_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bset_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bset_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bset_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bset_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bset_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bset_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, uint8_t, 8) ; \
}
GrB_Info GB_bind1st_tran__bset_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, uint8_t, 8) ; \
}
GrB_Info GB_bind2nd_tran__bset_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isgt_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int8)
// A*D function (colscale): GB (_AxD__isgt_int8)
// D*A function (rowscale): GB (_DxB__isgt_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int8)
// C=scalar+B GB (_bind1st__isgt_int8)
// C=scalar+B' GB (_bind1st_tran__isgt_int8)
// C=A+scalar GB (_bind2nd__isgt_int8)
// C=A'+scalar GB (_bind2nd_tran__isgt_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/identify.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/magick.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/segment.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
target[3],
zero;
RectangleInfo
bounds;
register const Quantum
*r;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetPixelInfo(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
r=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (r == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
GetPixelInfoPixel(image,r,&target[0]);
GetPixelInfo(image,&target[1]);
r=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (r != (const Quantum *) NULL)
GetPixelInfoPixel(image,r,&target[1]);
GetPixelInfo(image,&target[2]);
r=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (r != (const Quantum *) NULL)
GetPixelInfoPixel(image,r,&target[2]);
status=MagickTrue;
GetPixelInfo(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
RectangleInfo
bounding_box;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,p,&pixel);
if ((x < bounding_box.x) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) && (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDepth() returns the depth of a particular image channel.
%
% The format of the GetImageDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) &&
(image->alpha_trait == UndefinedPixelTrait))
{
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) &&
(GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) &&
(GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel) ||
(channel == MetaPixelChannel))
continue;
if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(p[i])];
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
/*
Compute pixel depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
QuantumAny
range;
range=GetQuantumRange(current_depth[id]);
if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range))
break;
current_depth[id]++;
}
}
p+=GetPixelChannels(image);
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ImageType GetImageType(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IsImageMonochrome(image) != MagickFalse)
return(BilevelType);
if (IsImageGray(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IsPaletteImage(image) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageGray() returns grayscale if all the pixels in the image have
% the same red, green, and blue intensities, and bi-level is the intensity is
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) &&
(IsPixelMonochrome(image,p) == MagickFalse))
type=GrayscaleType;
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait))
type=GrayscaleAlphaType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const Quantum
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(image,p) == MagickFalse)
{
type=UndefinedType;
break;
}
p+=GetPixelChannels(image);
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->alpha_trait == UndefinedPixelTrait)
return(ColorSeparationType);
return(ColorSeparationAlphaType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(GrayscaleAlphaType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->alpha_trait != UndefinedPixelTrait)
return(PaletteAlphaType);
return(PaletteType);
}
if (image->alpha_trait != UndefinedPixelTrait)
return(TrueColorAlphaType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageGray() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsImageGray method is:
%
% MagickBooleanType IsImageGray(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageGray(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleAlphaType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageMonochrome() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsImageMonochrome method is:
%
% MagickBooleanType IsImageMonochrome(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageMonochrome(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O p a q u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageOpaque() returns MagickTrue if none of the pixels in the image have
% an alpha value other than OpaqueAlpha (QuantumRange).
%
% Will return true immediatally is alpha channel is not available.
%
% The format of the IsImageOpaque method is:
%
% MagickBooleanType IsImageOpaque(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImageOpaque(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelAlpha(image,p) != OpaqueAlpha)
break;
p+=GetPixelChannels(image);
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageDepth() sets the depth of the image.
%
% The format of the SetImageDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].alpha),range),range);
}
}
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((1UL*QuantumRange) <= MaxMap)
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
q[i]=depth_map[ScaleQuantumToMap(q[i])];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || (channel == IndexPixelChannel) ||
(channel == ReadMaskPixelChannel))
continue;
q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType)
q[i]),range),range);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type,
ExceptionInfo *exception)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (SetImageMonochrome(image,exception) == MagickFalse)
{
status=TransformImageColorspace(image,GRAYColorspace,exception);
(void) NormalizeImage(image,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleType:
{
if (SetImageGray(image,exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case GrayscaleAlphaType:
{
if (SetImageGray(image,exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case PaletteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->alpha_trait=UndefinedPixelTrait;
break;
}
case PaletteBilevelAlphaType:
{
ChannelType
channel_mask;
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
channel_mask=SetImageChannelMask(image,AlphaChannel);
(void) BilevelImage(image,(double) QuantumRange/2.0,exception);
(void) SetImageChannelMask(image,channel_mask);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteAlphaType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case TrueColorAlphaType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
status=TransformImageColorspace(image,CMYKColorspace,exception);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
image->alpha_trait=UndefinedPixelTrait;
break;
}
case ColorSeparationAlphaType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace,exception);
status=TransformImageColorspace(image,CMYKColorspace,exception);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass,exception);
if (image->alpha_trait == UndefinedPixelTrait)
status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(status);
image->type=type;
return(MagickTrue);
}
|
coordinate_common.h | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#pragma once
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <limits>
#include "../common/random.h"
namespace xgboost {
namespace linear {
/**
* \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the
* number of training instances.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
* \param w The weight.
* \param reg_alpha Unnormalised L1 penalty.
* \param reg_lambda Unnormalised L2 penalty.
*
* \return The weight update.
*/
inline double CoordinateDelta(double sum_grad, double sum_hess, double w,
double reg_alpha, double reg_lambda) {
if (sum_hess < 1e-5f) return 0.0f;
const double sum_grad_l2 = sum_grad + reg_lambda * w;
const double sum_hess_l2 = sum_hess + reg_lambda;
const double tmp = w - sum_grad_l2 / sum_hess_l2;
if (tmp >= 0) {
return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w);
} else {
return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w);
}
}
/**
* \brief Calculate update to bias.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
*
* \return The weight update.
*/
inline double CoordinateDeltaBias(double sum_grad, double sum_hess) {
return -sum_grad / sum_hess;
}
/**
* \brief Get the gradient with respect to a single feature.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetColumnBatches()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to a single feature. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
for (const auto &batch : p_fmat->GetColumnBatches()) {
auto col = batch[fidx];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to the bias. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for the bias.
*/
inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint i = 0; i < ndata; ++i) {
auto &p = gpair[i * num_group + group_idx];
if (p.GetHess() >= 0.0f) {
sum_grad += p.GetGrad();
sum_hess += p.GetHess();
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Updates the gradient vector with respect to a change in weight.
*
* \param fidx The feature index.
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dw The change in weight.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateResidualParallel(int fidx, int group_idx, int num_group,
float dw, std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dw == 0.0f) return;
for (const auto &batch : p_fmat->GetColumnBatches()) {
auto col = batch[fidx];
// update grad value
const auto num_row = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < num_row; ++j) {
GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0);
}
}
}
/**
* \brief Updates the gradient vector based on a change in the bias.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dbias The change in bias.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias,
std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dbias == 0.0f) return;
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
GradientPair &g = (*in_gpair)[i * num_group + group_idx];
if (g.GetHess() < 0.0f) continue;
g += GradientPair(g.GetHess() * dbias, 0);
}
}
/**
* \brief Abstract class for stateful feature selection or ordering
* in coordinate descent algorithms.
*/
class FeatureSelector {
public:
/*! \brief factory method */
static FeatureSelector *Create(int choice);
/*! \brief virtual destructor */
virtual ~FeatureSelector() = default;
/**
* \brief Setting up the selector state prior to looping through features.
*
* \param model The model.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use.
*/
virtual void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat,
float alpha, float lambda, int param) {}
/**
* \brief Select next coordinate to update.
*
* \param iteration The iteration in a loop through features
* \param model The model.
* \param group_idx Zero-based index of the group.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
*
* \return The index of the selected feature. -1 indicates none selected.
*/
virtual int NextFeature(int iteration,
const gbm::GBLinearModel &model,
int group_idx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) = 0;
};
/**
* \brief Deterministic selection by cycling through features one at a time.
*/
class CyclicFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return iteration % model.param.num_feature;
}
};
/**
* \brief Similar to Cyclic but with random feature shuffling prior to each update.
* \note Its randomness is controllable by setting a random seed.
*/
class ShuffleFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
if (feat_index_.size() == 0) {
feat_index_.resize(model.param.num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0);
}
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return feat_index_[iteration % model.param.num_feature];
}
protected:
std::vector<bst_uint> feat_index_;
};
/**
* \brief A random (with replacement) coordinate selector.
* \note Its randomness is controllable by setting a random seed.
*/
class RandomFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return common::GlobalRandom()() % model.param.num_feature;
}
};
/**
* \brief Select coordinate with the greatest gradient magnitude.
* \note It has O(num_feature^2) complexity. It is fully deterministic.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup(). That would reduce the complexity to
* O(num_feature*top_k).
*/
class GreedyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.param.num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
if (counter_.size() == 0) {
counter_.resize(ngroup);
gpair_sums_.resize(model.param.num_feature * ngroup);
}
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-K or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1;
const int ngroup = model.param.num_output_group;
const bst_omp_uint nfeat = model.param.num_feature;
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetColumnBatches()) {
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
auto &sums = gpair_sums_[group_idx * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + group_idx];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
// Find a feature with the largest magnitude of weight change
int best_fidx = 0;
double best_weight_update = 0.0f;
for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) {
auto &s = gpair_sums_[group_idx * nfeat + fidx];
float dw = std::abs(static_cast<bst_float>(
CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda)));
if (dw > best_weight_update) {
best_weight_update = dw;
best_fidx = fidx;
}
}
return best_fidx;
}
protected:
bst_uint top_k_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief Thrifty, approximately-greedy feature selector.
*
* \note Prior to cyclic updates, reorders features in descending magnitude of
* their univariate weight changes. This operation is multithreaded and is a
* linear complexity approximation of the quadratic greedy selection.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup().
*/
class ThriftyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
const bst_uint ngroup = model.param.num_output_group;
const bst_omp_uint nfeat = model.param.num_feature;
if (deltaw_.size() == 0) {
deltaw_.resize(nfeat * ngroup);
sorted_idx_.resize(nfeat * ngroup);
counter_.resize(ngroup);
gpair_sums_.resize(nfeat * ngroup);
}
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
for (const auto &batch : p_fmat->GetColumnBatches()) {
// column-parallel is usually faster than row-parallel
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const auto col = batch[i];
const bst_uint ndata = col.size();
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
auto &sums = gpair_sums_[gid * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + gid];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
}
// rank by descending weight magnitude within the groups
std::fill(deltaw_.begin(), deltaw_.end(), 0.f);
std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0);
bst_float *pdeltaw = &deltaw_[0];
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
// Calculate univariate weight changes
for (bst_omp_uint i = 0; i < nfeat; ++i) {
auto ii = gid * nfeat + i;
auto &s = gpair_sums_[ii];
deltaw_[ii] = static_cast<bst_float>(CoordinateDelta(
s.first, s.second, model[i][gid], alpha, lambda));
}
// sort in descending order of deltaw abs values
auto start = sorted_idx_.begin() + gid * nfeat;
std::sort(start, start + nfeat,
[pdeltaw](size_t i, size_t j) {
return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j));
});
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1;
// note that sorted_idx stores the "long" indices
const size_t grp_offset = group_idx * model.param.num_feature;
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
}
protected:
bst_uint top_k_;
std::vector<bst_float> deltaw_;
std::vector<size_t> sorted_idx_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief A set of available FeatureSelector's
*/
enum FeatureSelectorEnum {
kCyclic = 0,
kShuffle,
kThrifty,
kGreedy,
kRandom
};
inline FeatureSelector *FeatureSelector::Create(int choice) {
switch (choice) {
case kCyclic:
return new CyclicFeatureSelector();
case kShuffle:
return new ShuffleFeatureSelector();
case kThrifty:
return new ThriftyFeatureSelector();
case kGreedy:
return new GreedyFeatureSelector();
case kRandom:
return new RandomFeatureSelector();
default:
LOG(FATAL) << "unknown coordinate selector: " << choice;
}
return nullptr;
}
} // namespace linear
} // namespace xgboost
|
omp_matvec.c | /******************************************************************************
* OpenMP Example - Matrix-vector multiplication - C/C++ Version
* FILE: omp_matvec.c
* DESCRIPTION:
* This example multiplies all row i elements of matrix A with vector
* element b(i) and stores the summed products in vector c(i). A total is
* maintained for the entire matrix. Performed by using the OpenMP loop
* work-sharing construct. The update of the shared global total is
* serialized by using the OpenMP critical directive.
* SOURCE: Blaise Barney 5/99
* LAST REVISED:
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#define SIZE 10
int
main ()
{
float A[SIZE][SIZE], b[SIZE], c[SIZE], total;
int i, j, tid;
/* Initializations */
total = 0.0;
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
A[i][j] = (j+1) * 1.0;
b[i] = 1.0 * (i+1);
c[i] = 0.0;
}
printf("\nStarting values of matrix A and vector b:\n");
for (i=0; i < SIZE; i++)
{
printf(" A[%d]= ",i);
for (j=0; j < SIZE; j++)
printf("%.1f ",A[i][j]);
printf(" b[%d]= %.1f\n",i,b[i]);
}
printf("\nResults by thread/row:\n");
/* Create a team of threads and scope variables */
#pragma omp parallel shared(A,b,c,total) private(tid,i)
{
tid = omp_get_thread_num();
/* Loop work-sharing construct - distribute rows of matrix */
#pragma omp for private(j)
for (i=0; i < SIZE; i++)
{
for (j=0; j < SIZE; j++)
c[i] += (A[i][j] * b[i]);
/* Update and display of running total must be serialized */
#pragma omp critical
{
total = total + c[i];
printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]);
printf("Running total= %.2f\n",total);
}
} /* end of parallel i loop */
} /* end of parallel construct */
printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total);
return 0;
}
|
arrays.c | /**
* module with tools for manipulating arrays
* Julien Lesgourgues, 18.04.2010
*/
#include "arrays.h"
/**
* Called by thermodynamics_init(); perturb_sources().
*/
int array_derive(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
class_test((index_dydx == index_x) || (index_dydx == index_y),
errmsg,
"output column %d must differ from input columns %d and %d",index_dydx,index_x,index_y);
dx2=array[1*n_columns+index_x]-array[0*n_columns+index_x];
dy2=array[1*n_columns+index_y]-array[0*n_columns+index_y];
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x];
dy2 = array[(i+1)*n_columns+index_y]-array[i*n_columns+index_y];
class_test((dx1 == 0) || (dx2 == 0),
errmsg,
"stop to avoid division by zero");
weight1 = dx2*dx2;
weight2 = dx1*dx1;
array[i*n_columns+index_dydx] = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1)
array[(i-1)*n_columns+index_dydx] = 2.*dy1/dx1 - array[i*n_columns+index_dydx];
if (i == n_lines-2)
array[(i+1)*n_columns+index_dydx] = 2.*dy2/dx2 - array[i*n_columns+index_dydx];
}
return _SUCCESS_;
}
int array_derive_spline(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
int index_y,
int index_dydx,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_dydx == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_dydx,
index_y);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dydx] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array_splined[(i+1)*n_columns+index_y] + 2. * array_splined[i*n_columns+index_y]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dydx] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array_splined[(n_lines-1)*n_columns+index_y] + array_splined[(n_lines-2)*n_columns+index_y]);
return _SUCCESS_;
}
int array_derive_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_dy,
ErrorMsg errmsg) {
int i;
double h;
class_test(index_ddy == index_y,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_y);
class_test(index_ddy == index_dy,
errmsg,
"Output column %d must differ from input columns %d",
index_ddy,
index_dy);
class_test(n_lines<2,
errmsg,
"no possible derivation with less than two lines");
for (i=0; i<n_lines-1; i++) {
h = x_array[i+1] - x_array[i];
if (h == 0) {
sprintf(errmsg,"%s(L:%d) h=0, stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
array[i*n_columns+index_dy] =
(array[(i+1)*n_columns+index_y] - array[i*n_columns+index_y])/h
- h / 6. * (array[(i+1)*n_columns+index_ddy] + 2. * array[i*n_columns+index_ddy]);
}
h = x_array[n_lines-1] - x_array[n_lines-2];
array[(n_lines-1)*n_columns+index_dy] =
(array[(n_lines-1)*n_columns+index_y] - array[(n_lines-2)*n_columns+index_y])/h
+ h / 6. * (2. * array[(n_lines-1)*n_columns+index_ddy] + array[(n_lines-2)*n_columns+index_ddy]);
return _SUCCESS_;
}
int array_derive1_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
ErrorMsg errmsg) {
int i=1;
double dxp,dxm,dyp,dym;
if (n_lines < 2) {
sprintf(errmsg,"%s(L:%d) routine called with n_lines=%d, should be at least 2",__func__,__LINE__,n_lines);
return _FAILURE_;
}
dxp = x_array[2] - x_array[1];
dxm = x_array[0] - x_array[1];
dyp = *(array+2*n_columns+index_y) - *(array+1*n_columns+index_y);
dym = *(array+0*n_columns+index_y) - *(array+1*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+1*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
for (i=2; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
}
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
return _SUCCESS_;
}
int array_derive2_order2_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_dy,
int index_ddy,
ErrorMsg errmsg) {
int i;
double dxp,dxm,dyp,dym;
for (i=1; i<n_lines-1; i++) {
dxp = x_array[i+1] - x_array[i];
dxm = x_array[i-1] - x_array[i];
dyp = *(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y);
dym = *(array+(i-1)*n_columns+index_y) - *(array+i*n_columns+index_y);
if ((dxp*dxm*(dxm-dxp)) == 0.) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dy) = (dyp*dxm*dxm-dym*dxp*dxp)/(dxp*dxm*(dxm-dxp));
*(array+i*n_columns+index_ddy) = 2.*(dyp*dxm-dym*dxp)/(dxp*dxm*(dxp-dxm));
}
*(array+0*n_columns+index_dy) = *(array+1*n_columns+index_dy)
- (x_array[1] - x_array[0]) * *(array+1*n_columns+index_ddy);
*(array+0*n_columns+index_ddy) = *(array+1*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_dy) = *(array+(n_lines-2)*n_columns+index_dy)
+ (x_array[n_lines-1] - x_array[n_lines-2]) * *(array+(n_lines-2)*n_columns+index_ddy);
*(array+(n_lines-1)*n_columns+index_ddy) = *(array+(n_lines-2)*n_columns+index_ddy);
return _SUCCESS_;
}
int array_integrate_spline_table_line_to_line(
double * x_array,
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddy,
int index_inty,
ErrorMsg errmsg) {
int i;
double h;
*(array+0*n_columns+index_inty) = 0.;
for (i=0; i < n_lines-1; i++) {
h = (x_array[i+1]-x_array[i]);
*(array+(i+1)*n_columns+index_inty) = *(array+i*n_columns+index_inty) +
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_derive_two(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_dydx,
int index_ddydxdx,
ErrorMsg errmsg) {
int i;
double dx1,dx2,dy1,dy2,weight1,weight2;
if ((index_dydx == index_x) || (index_dydx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_dydx,index_x,index_y);
return _FAILURE_;
}
dx2=*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x);
dy2=*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y);
for (i=1; i<n_lines-1; i++) {
dx1 = dx2;
dy1 = dy2;
dx2 = *(array+(i+1)*n_columns+index_x)-*(array+i*n_columns+index_x);
dy2 = *(array+(i+1)*n_columns+index_y)-*(array+i*n_columns+index_y);
weight1 = dx2*dx2;
weight2 = dx1*dx1;
if ((dx1 == 0.) && (dx2 == 0.)) {
sprintf(errmsg,"%s(L:%d) stop to avoid division by zero",__func__,__LINE__);
return _FAILURE_;
}
*(array+i*n_columns+index_dydx) = (weight1*dy1+weight2*dy2) / (weight1*dx1+weight2*dx2);
*(array+i*n_columns+index_ddydxdx) = (dx2*dy1-dx1*dy2) / (weight1*dx1+weight2*dx2);
if (i == 1) {
*(array+(i-1)*n_columns+index_dydx) = 2.*dy1/dx1 - *(array+i*n_columns+index_dydx);
*(array+(i-1)*n_columns+index_ddydxdx) = *(array+i*n_columns+index_ddydxdx);
}
if (i == n_lines-2) {
*(array+(i+1)*n_columns+index_dydx) = 2.*dy2/dx2 - *(array+i*n_columns+index_dydx);
*(array+(i+1)*n_columns+index_dydx) = *(array+i*n_columns+index_ddydxdx);
}
}
return _SUCCESS_;
}
int array_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
if (n_lines < 3) {
sprintf(errmsg,"%s(L:%d) n_lines=%d, while routine needs n_lines >= 3",__func__,__LINE__,n_lines);
return _FAILURE_;
}
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((*(array+2*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+1*n_columns+index_x)-*(array+0*n_columns+index_x))*
(*(array+2*n_columns+index_x)-*(array+1*n_columns+index_x)));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x)))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(*(array+1*n_columns+index_x) - *(array+0*n_columns+index_x))
-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
/ (*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (*(array+(i+1)*n_columns+index_x) - *(array+i*n_columns+index_x))
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
u[i]= (6.0 * u[i] /
(*(array+(i+1)*n_columns+index_x) - *(array+(i-1)*n_columns+index_x))
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-2)*n_columns+index_x)-*(array+(n_lines-1)*n_columns+index_x))*
(*(array+(n_lines-3)*n_columns+index_x)-*(array+(n_lines-2)*n_columns+index_x)));
qn=0.5;
un =
(3./(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(*(array+(n_lines-1)*n_columns+index_x) - *(array+(n_lines-2)*n_columns+index_x)));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_line_to_line(
double * x, /* vector of size x_size */
int n_lines,
double * array,
int n_columns,
int index_y,
int index_ddydx2,
short spline_mode,
ErrorMsg errmsg) {
int i,k;
double p,qn,sig,un;
double * u;
double dy_first;
double dy_last;
u = malloc((n_lines-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (spline_mode == _SPLINE_NATURAL_) {
*(array+0*n_columns+index_ddydx2) = u[0] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(*(array+1*n_columns+index_y)-*(array+0*n_columns+index_y))-
(x[1]-x[0])*(x[1]-x[0])*
(*(array+2*n_columns+index_y)-*(array+0*n_columns+index_y)))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
*(array+0*n_columns+index_ddydx2) = -0.5;
u[0] =
(3./(x[1] - x[0]))*
((*(array+1*n_columns+index_y) - *(array+0*n_columns+index_y))/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (i=1; i < n_lines-1; i++) {
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1]);
p = sig * *(array+(i-1)*n_columns+index_ddydx2) + 2.0;
*(array+i*n_columns+index_ddydx2) = (sig-1.0)/p;
u[i] = (*(array+(i+1)*n_columns+index_y) - *(array+i*n_columns+index_y))
/ (x[i+1] - x[i])
- (*(array+i*n_columns+index_y) - *(array+(i-1)*n_columns+index_y))
/ (x[i] - x[i-1]);
u[i]= (6.0 * u[i] /
(x[i+1] - x[i-1])
- sig * u[i-1]) / p;
}
if (spline_mode == _SPLINE_NATURAL_) {
qn=0.;
un=0.;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-1])*
(*(array+(n_lines-2)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y))-
(x[n_lines-2]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*
(*(array+(n_lines-3)*n_columns+index_y)-*(array+(n_lines-1)*n_columns+index_y)))/
((x[n_lines-3]-x[n_lines-1])*(x[n_lines-2]-x[n_lines-1])*(x[n_lines-3]-x[n_lines-2]));
qn=0.5;
un =
(3./(x[n_lines-1] - x[n_lines-2]))*
(dy_last-(*(array+(n_lines-1)*n_columns+index_y) - *(array+(n_lines-2)*n_columns+index_y))/
(x[n_lines-1] - x[n_lines-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
*(array+(n_lines-1)*n_columns+index_ddydx2) =
(un-qn*u[n_lines-2])/(qn* *(array+(n_lines-2)*n_columns+index_ddydx2)+1.0);
for (k=n_lines-2; k>=0; k--)
*(array+k*n_columns+index_ddydx2) = *(array+k*n_columns+index_ddydx2) *
*(array+(k+1)*n_columns+index_ddydx2) + u[k];
free(u);
return _SUCCESS_;
}
int array_spline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[1*y_size+index_y]-y_array[0*y_size+index_y])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[2*y_size+index_y]-y_array[0*y_size+index_y]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[1*y_size+index_y]-y_array[0*y_size+index_y])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[(index_x-1)*y_size+index_y] + 2.0;
ddy_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[(index_x+1)*y_size+index_y] - y_array[index_x*y_size+index_y])
/ (x[index_x+1] - x[index_x])
- (y_array[index_x*y_size+index_y] - y_array[(index_x-1)*y_size+index_y])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[(x_size-2)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[(x_size-3)*y_size+index_y]-y_array[(x_size-1)*y_size+index_y]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[(x_size-1)*y_size+index_y] - y_array[(x_size-2)*y_size+index_y])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_x*y_size+index_y] = ddy_array[index_x*y_size+index_y] *
ddy_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_logspline_table_lines(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_x*y_size+index_y] */
int y_size,
double * ddlny_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[2*y_size+index_y])-log(y_array[0*y_size+index_y])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlny_array[index_x*y_size+index_y] = -0.5;
u[index_x*y_size+index_y] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[1*y_size+index_y])-log(y_array[0*y_size+index_y]))/
(log(x[1]) - log(x[0]))-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddlny_array[(index_x-1)*y_size+index_y] + 2.0;
ddlny_array[index_x*y_size+index_y] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(log(y_array[(index_x+1)*y_size+index_y]) - log(y_array[index_x*y_size+index_y]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_x*y_size+index_y]) - log(y_array[(index_x-1)*y_size+index_y]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-1]))*
(log(y_array[(x_size-2)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y]))-
(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*
(log(y_array[(x_size-3)*y_size+index_y])-log(y_array[(x_size-1)*y_size+index_y])))/
((log(x[x_size-3])-log(x[x_size-1]))*(log(x[x_size-2])-log(x[x_size-1]))*(log(x[x_size-3])-log(x[x_size-2])));
qn[index_y]=0.5;
un[index_y]=
(3./(log(x[x_size-1]) - log(x[x_size-2])))*
(dy_last-(log(y_array[(x_size-1)*y_size+index_y]) - log(y_array[(x_size-2)*y_size+index_y]))/
(log(x[x_size-1]) - log(x[x_size-2])));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddlny_array[(index_x-1)*y_size+index_y] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddlny_array[index_x*y_size+index_y] = ddlny_array[index_x*y_size+index_y] *
ddlny_array[(index_x+1)*y_size+index_y] + u[index_x*y_size+index_y];
}
}
free(qn);
free(un);
free(p);
free(u);
return _SUCCESS_;
}
int array_spline_table_columns(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x*y_size+index_y] = 0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
class_test(x[2]-x[0]==0.,
errmsg,
"x[2]=%g, x[0]=%g, stop to avoid seg fault",x[2],x[0]);
class_test(x[1]-x[0]==0.,
errmsg,
"x[1]=%g, x[0]=%g, stop to avoid seg fault",x[1],x[0]);
class_test(x[2]-x[1]==0.,
errmsg,
"x[2]=%g, x[1]=%g, stop to avoid seg fault",x[2],x[1]);
for (index_y=0; index_y < y_size; index_y++) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
for (index_y=0; index_y < y_size; index_y++) {
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
}
if (spline_mode == _SPLINE_NATURAL_) {
for (index_y=0; index_y < y_size; index_y++) {
qn[index_y]=un[index_y]=0.0;
}
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
for (index_y=0; index_y < y_size; index_y++) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
index_x=x_size-1;
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
}
for (index_x=x_size-2; index_x >= 0; index_x--) {
for (index_y=0; index_y < y_size; index_y++) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_columns2(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double * p;
double * qn;
double * un;
double * u;
double sig;
int index_x;
int index_y;
double dy_first;
double dy_last;
u = malloc((x_size-1) * y_size * sizeof(double));
p = malloc(y_size * sizeof(double));
qn = malloc(y_size * sizeof(double));
un = malloc(y_size * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (p == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate p",__func__,__LINE__);
return _FAILURE_;
}
if (qn == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate qn",__func__,__LINE__);
return _FAILURE_;
}
if (un == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate un",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
#pragma omp parallel \
shared(x,x_size,y_array,y_size,ddy_array,spline_mode,p,qn,un,u) \
private(index_y,index_x,sig,dy_first,dy_last)
{
#pragma omp for schedule (dynamic)
for (index_y=0; index_y < y_size; index_y++) {
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+0] = 0.0;
u[0*y_size+index_y] = 0.0;
}
else {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+0] = -0.5;
u[0*y_size+index_y] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p[index_y] = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p[index_y];
u[index_x*y_size+index_y] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x*y_size+index_y] = (6.0 * u[index_x*y_size+index_y] /
(x[index_x+1] - x[index_x-1])
- sig * u[(index_x-1)*y_size+index_y]) / p[index_y];
}
if (spline_mode == _SPLINE_NATURAL_) {
qn[index_y]=un[index_y]=0.0;
}
else {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn[index_y]=0.5;
un[index_y]=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un[index_y] - qn[index_y] * u[(index_x-1)*y_size+index_y]) /
(qn[index_y] * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x*y_size+index_y];
}
}
}
free(qn);
free(p);
free(u);
free(un);
return _SUCCESS_;
}
int array_spline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_size-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((x[2]-x[0])*(x[2]-x[0])*
(y_array[index_y*x_size+1]-y_array[index_y*x_size+0])-
(x[1]-x[0])*(x[1]-x[0])*
(y_array[index_y*x_size+2]-y_array[index_y*x_size+0]))/
((x[2]-x[0])*(x[1]-x[0])*(x[2]-x[1]));
ddy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(x[1] - x[0]))*
((y_array[index_y*x_size+1]-y_array[index_y*x_size+0])/
(x[1] - x[0])-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_size-1; index_x++) {
sig = (x[index_x] - x[index_x-1])/(x[index_x+1] - x[index_x-1]);
p = sig * ddy_array[index_y*x_size+(index_x-1)] + 2.0;
ddy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(y_array[index_y*x_size+(index_x+1)] - y_array[index_y*x_size+index_x])
/ (x[index_x+1] - x[index_x])
- (y_array[index_y*x_size+index_x] - y_array[index_y*x_size+(index_x-1)])
/ (x[index_x] - x[index_x-1]);
u[index_x] = (6.0 * u[index_x] /
(x[index_x+1] - x[index_x-1])
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((x[x_size-3]-x[x_size-1])*(x[x_size-3]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-2)]-y_array[index_y*x_size+(x_size-1)])-
(x[x_size-2]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*
(y_array[index_y*x_size+(x_size-3)]-y_array[index_y*x_size+(x_size-1)]))/
((x[x_size-3]-x[x_size-1])*(x[x_size-2]-x[x_size-1])*(x[x_size-3]-x[x_size-2]));
qn=0.5;
un=
(3./(x[x_size-1] - x[x_size-2]))*
(dy_last-(y_array[index_y*x_size+(x_size-1)] - y_array[index_y*x_size+(x_size-2)])/
(x[x_size-1] - x[x_size-2]));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_size-1;
ddy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_size-2; index_x >= 0; index_x--) {
ddy_array[index_y*x_size+index_x] = ddy_array[index_y*x_size+index_x] *
ddy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_logspline_table_one_column(
double * x, /* vector of size x_size */
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
short spline_mode,
ErrorMsg errmsg
) {
double p;
double qn;
double un;
double * u;
double sig;
int index_x;
double dy_first;
double dy_last;
u = malloc((x_stop-1) * sizeof(double));
if (u == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate u",__func__,__LINE__);
return _FAILURE_;
}
if (x_size==2) spline_mode = _SPLINE_NATURAL_; // in the case of only 2 x-values, only the natural spline method is appropriate, for _SPLINE_EST_DERIV_ at least 3 x-values are needed.
/************************************************/
index_x=0;
if (spline_mode == _SPLINE_NATURAL_) {
ddlogy_array[index_y*x_size+index_x] = 0.0;
u[index_x] = 0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_first =
((log(x[2])-log(x[0]))*(log(x[2])-log(x[0]))*
(log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))-
(log(x[1])-log(x[0]))*(log(x[1])-log(x[0]))*
(log(y_array[index_y*x_size+2])-log(y_array[index_y*x_size+0])))/
((log(x[2])-log(x[0]))*(log(x[1])-log(x[0]))*(log(x[2])-log(x[1])));
ddlogy_array[index_y*x_size+index_x] = -0.5;
u[index_x] =
(3./(log(x[1]) - log(x[0])))*
((log(y_array[index_y*x_size+1])-log(y_array[index_y*x_size+0]))/
(log(x[1]) - log(x[0]))-dy_first);
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
for (index_x=1; index_x < x_stop-1; index_x++) {
sig = (log(x[index_x]) - log(x[index_x-1]))/(log(x[index_x+1]) - log(x[index_x-1]));
p = sig * ddlogy_array[index_y*x_size+(index_x-1)] + 2.0;
ddlogy_array[index_y*x_size+index_x] = (sig-1.0)/p;
u[index_x] =
(log(y_array[index_y*x_size+(index_x+1)]) - log(y_array[index_y*x_size+index_x]))
/ (log(x[index_x+1]) - log(x[index_x]))
- (log(y_array[index_y*x_size+index_x]) - log(y_array[index_y*x_size+(index_x-1)]))
/ (log(x[index_x]) - log(x[index_x-1]));
u[index_x] = (6.0 * u[index_x] /
(log(x[index_x+1]) - log(x[index_x-1]))
- sig * u[index_x-1]) / p;
}
/************************************************/
if (spline_mode == _SPLINE_NATURAL_) {
qn=un=0.0;
}
else {
if (spline_mode == _SPLINE_EST_DERIV_) {
dy_last =
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-3])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-2)])-log(y_array[index_y*x_size+(x_stop-1)]))-
(log(x[x_stop-2])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(y_array[index_y*x_size+(x_stop-3)])-log(y_array[index_y*x_size+(x_stop-1)])))/
((log(x[x_stop-3])-log(x[x_stop-1]))*(log(x[x_stop-2])-log(x[x_stop-1]))*
(log(x[x_stop-3])-log(x[x_stop-2])));
qn=0.5;
un=
(3./(log(x[x_stop-1]) - log(x[x_stop-2])))*
(dy_last-(log(y_array[index_y*x_size+(x_stop-1)]) - log(y_array[index_y*x_size+(x_stop-2)]))/
(log(x[x_stop-1]) - log(x[x_stop-2])));
}
else {
sprintf(errmsg,"%s(L:%d) Spline mode not identified: %d",__func__,__LINE__,spline_mode);
return _FAILURE_;
}
}
/************************************************/
index_x=x_stop-1;
ddlogy_array[index_y*x_size+index_x] =
(un - qn * u[index_x-1]) /
(qn * ddlogy_array[index_y*x_size+(index_x-1)] + 1.0);
for (index_x=x_stop-2; index_x >= 0; index_x--) {
ddlogy_array[index_y*x_size+index_x] = ddlogy_array[index_y*x_size+index_x] *
ddlogy_array[index_y*x_size+(index_x+1)] + u[index_x];
}
free(u);
return _SUCCESS_;
}
int array_integrate_all_spline(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
*result = 0;
for (i=0; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
int array_integrate_all_trapzd_or_spline(
double * array,
int n_columns,
int n_lines,
int index_start_spline,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_ddy,
double * result,
ErrorMsg errmsg) {
int i;
double h;
if ((index_start_spline<0) || (index_start_spline>=n_lines)) {
sprintf(errmsg,"%s(L:%d) index_start_spline outside of range",__func__,__LINE__);
return _FAILURE_;
}
*result = 0;
/* trapezoidal integration till given index */
for (i=0; i < index_start_spline; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.;
}
/* then, spline integration */
for (i=index_start_spline; i < n_lines-1; i++) {
h = (array[(i+1)*n_columns+index_x]-array[i*n_columns+index_x]);
*result +=
(array[i*n_columns+index_y]+array[(i+1)*n_columns+index_y])*h/2.+
(array[i*n_columns+index_ddy]+array[(i+1)*n_columns+index_ddy])*h*h*h/24.;
}
return _SUCCESS_;
}
/**
* Not called.
*/
int array_integrate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
int index_int_y_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y_dx == index_x) || (index_int_y_dx == index_y)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d and %d",__func__,__LINE__,index_int_y_dx,index_x,index_y);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y_dx)=sum;
}
return _SUCCESS_;
}
/**
* Called by thermodynamics_init().
*/
int array_integrate_ratio(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y1,
int index_y2,
int index_int_y1_over_y2_dx,
ErrorMsg errmsg) {
int i;
double sum;
if ((index_int_y1_over_y2_dx == index_x) || (index_int_y1_over_y2_dx == index_y1) || (index_int_y1_over_y2_dx == index_y2)) {
sprintf(errmsg,"%s(L:%d) : Output column %d must differ from input columns %d, %d and %d",__func__,__LINE__,index_int_y1_over_y2_dx,index_x,index_y1,index_y2);
return _FAILURE_;
}
sum=0.;
*(array+0*n_columns+index_int_y1_over_y2_dx)=sum;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y1) / *(array+i*n_columns+index_y2)
+ *(array+(i-1)*n_columns+index_y1) / *(array+(i-1)*n_columns+index_y2))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
*(array+i*n_columns+index_int_y1_over_y2_dx)=sum;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (*(array+inf*n_columns+index_x) < *(array+sup*n_columns+index_x)){
if (x < *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array+sup*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array+sup*n_columns+index_x));
return _FAILURE_;
}
if (x > *(array+inf*n_columns+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array+inf*n_columns+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array+mid*n_columns+index_x)) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline(
double * __restrict__ x_array,
int n_lines,
double * __restrict__ array,
double * __restrict__ array_splined,
int n_columns,
double x,
int * __restrict__ last_index,
double * __restrict__ result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_linear(
double * x_array,
int n_lines,
double * array,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_logspline(
double * x_array,
int n_lines,
double * array,
double * array_logsplined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double h,a,b;
inf=0;
sup=n_lines-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
*last_index = inf;
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) = exp(
a * log(array[inf*n_columns+i]) +
b * log(array[sup*n_columns+i]) +
((a*a*a-a)* array_logsplined[inf*n_columns+i] +
(b*b*b-b)* array_logsplined[sup*n_columns+i])*h*h/6.);
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_spline_one_column(
double * x_array,
int x_size,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_size-2] || x < x_array[0]) {
/*interpolate/extrapolate linearly y as a function of x*/
h = x_array[x_size-1] - x_array[x_size-2];
b = (x-x_array[x_size-2])/h;
a = 1-b;
*y = a * y_array[index_y * x_size + (x_size-2)] +
b * y_array[index_y * x_size + (x_size-1)];
}
else {
/*interpolate y as a function of x with a spline*/
inf=0;
sup=x_size-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
*y =
a * y_array[index_y * x_size + inf] +
b * y_array[index_y * x_size + sup] +
((a*a*a-a)* ddy_array[index_y * x_size + inf] +
(b*b*b-b)* ddy_array[index_y * x_size + sup])*h*h/6.;
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are in different arrays
*
*
*/
int array_interpolate_extrapolate_logspline_loglinear_one_column(
double * x_array,
int x_size,
int x_stop,
double * y_array, /* array of size x_size*y_size with elements
y_array[index_y*x_size+index_x] */
int y_size,
int index_y,
double * ddlogy_array, /* array of size x_size*y_size */
double x, /* input */
double * y, /* output */
ErrorMsg errmsg
) {
int inf,sup,mid;
double h,a,b;
if (x > x_array[x_stop-1]) {
/*interpolate/extrapolate linearly ln(y) as a function of ln(x)*/
h = log(x_array[x_stop-1]) - log(x_array[x_stop-2]);
b = (log(x)-log(x_array[x_stop-2]))/h;
a = 1-b;
/* *y = exp(a * log(y_array[index_y * x_size + (x_stop-2)]) + */
/* b * log(y_array[index_y * x_size + (x_stop-1)])); */
*y = exp(log(y_array[index_y * x_size + (x_stop-1)])
+(log(x)-log(x_array[x_stop-1]))
*((log(y_array[index_y * x_size + (x_stop-1)])-log(y_array[index_y * x_size + (x_stop-2)]))/h
+h/6.*(ddlogy_array[index_y * x_size + (x_stop-2)]+2.*ddlogy_array[index_y * x_size + (x_stop-1)])));
}
else {
/*interpolate ln(y) as a function of ln(x) with a spline*/
inf=0;
sup=x_stop-1;
if (x_array[inf] < x_array[sup]){
if (x < x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
if (x > x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < x_array[sup]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,x_array[sup]);
return _FAILURE_;
}
if (x > x_array[inf]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,x_array[inf]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
h = log(x_array[sup]) - log(x_array[inf]);
b = (log(x)-log(x_array[inf]))/h;
a = 1-b;
*y = exp(a * log(y_array[index_y * x_size + inf]) +
b * log(y_array[index_y * x_size + sup]) +
((a*a*a-a)* ddlogy_array[index_y * x_size + inf] +
(b*b*b-b)* ddlogy_array[index_y * x_size + sup])*h*h/6.);
}
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array+inf*n_columns+i) * (1.-weight)
+ weight * *(array+sup*n_columns+i);
*(result+index_x) = x;
return _SUCCESS_;
}
/**
* interpolate to get y(x), when x and y are two columns of the same array, x is arranged in growing order, and the point x is presumably close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_one_growing_closeby(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
double x,
int * last_index,
int index_y,
double * result,
ErrorMsg errmsg) {
int inf,sup;
double weight;
inf = *last_index;
sup = *last_index+1;
while (x < *(array+inf*n_columns+index_x)) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,array[index_x]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > *(array+sup*n_columns+index_x)) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,array[(n_lines-1)*n_columns+index_x]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
weight=(x-*(array+inf*n_columns+index_x))/(*(array+sup*n_columns+index_x)-*(array+inf*n_columns+index_x));
*result = *(array+inf*n_columns+index_y) * (1.-weight) + *(array+sup*n_columns+index_y) * weight;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably very close to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_closeby(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,i;
double h,a,b;
/*
if (*last_index < 0) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d < 0",__func__,__LINE__,*last_index);
return _FAILURE_;
}
if (*last_index > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) problem with last_index =%d > %d",__func__,__LINE__,*last_index,n_lines-1);
return _FAILURE_;
}
*/
inf = *last_index;
class_test(inf<0 || inf>(n_lines-1),
errmsg,
"*lastindex=%d out of range [0:%d]\n",inf,n_lines-1);
while (x < x_array[inf]) {
inf--;
if (inf < 0) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
}
sup = inf+1;
while (x > x_array[sup]) {
sup++;
if (sup > (n_lines-1)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
}
inf = sup-1;
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate to get y_i(x), when x and y_i are all columns of the same array, x is arranged in growing order, and the point x is presumably close (but maybe not so close) to the previous point x from the last call of this function.
*
* Called by background_at_eta(); background_eta_of_z(); background_solve(); thermodynamics_at_z().
*/
int array_interpolate_spline_growing_hunt(
double * x_array,
int n_lines,
double * array,
double * array_splined,
int n_columns,
double x,
int * last_index,
double * result,
int result_size, /** from 1 to n_columns */
ErrorMsg errmsg) {
int inf,sup,mid,i,inc;
double h,a,b;
inc=1;
if (x >= x_array[*last_index]) {
if (x > x_array[n_lines-1]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,
x,x_array[n_lines-1]);
return _FAILURE_;
}
/* try closest neighboor upward */
inf = *last_index;
sup = inf + inc;
if (x > x_array[sup]) {
/* hunt upward */
while (x > x_array[sup]) {
inf = sup;
inc += 1;
sup += inc;
if (sup > n_lines-1) {
sup = n_lines-1;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
else {
if (x < x_array[0]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,
x,x_array[0]);
return _FAILURE_;
}
/* try closest neighboor downward */
sup = *last_index;
inf = sup - inc;
if (x < x_array[inf]) {
/* hunt downward */
while (x < x_array[inf]) {
sup = inf;
inc += 1;
inf -= inc;
if (inf < 0) {
inf = 0;
}
}
/* bisect */
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < x_array[mid]) {sup=mid;}
else {inf=mid;}
}
}
}
*last_index = inf;
h = x_array[sup] - x_array[inf];
b = (x-x_array[inf])/h;
a = 1-b;
for (i=0; i<result_size; i++)
*(result+i) =
a * *(array+inf*n_columns+i) +
b * *(array+sup*n_columns+i) +
((a*a*a-a)* *(array_splined+inf*n_columns+i) +
(b*b*b-b)* *(array_splined+sup*n_columns+i))*h*h/6.;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+i*n_lines+inf) * (1.-weight)
+ weight * *(array_y+i*n_lines+sup) ;
return _SUCCESS_;
}
/**
* Same as array_interpolate_two, but with order of indices exchanged in array_y
*/
int array_interpolate_two_bis(
double * array_x,
int n_columns_x,
int index_x, /** from 0 to (n_columns_x-1) */
double * array_y,
int n_columns_y,
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
int result_size, /** from 1 to n_columns_y */
ErrorMsg errmsg) {
int inf,sup,mid,i;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf*n_columns_x+index_x] < array_x[sup*n_columns_x+index_x]){
if (x < array_x[inf*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,array_x[inf*n_columns_x+index_x]);
return _FAILURE_;
}
if (x > array_x[sup*n_columns_x+index_x]) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,array_x[sup*n_columns_x+index_x]);
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid*n_columns_x+index_x]) {sup=mid;}
else {inf=mid;}
}
}
else {
if (x < *(array_x+sup*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e < x_min=%e",__func__,__LINE__,x,*(array_x+sup*n_columns_x+index_x));
return _FAILURE_;
}
if (x > *(array_x+inf*n_columns_x+index_x)) {
sprintf(errmsg,"%s(L:%d) : x=%e > x_max=%e",__func__,__LINE__,x,*(array_x+inf*n_columns_x+index_x));
return _FAILURE_;
}
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > *(array_x+mid*n_columns_x+index_x)) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-*(array_x+inf*n_columns_x+index_x))/(*(array_x+sup*n_columns_x+index_x)-*(array_x+inf*n_columns_x+index_x));
for (i=0; i<result_size; i++)
*(result+i) = *(array_y+inf*n_columns_y+i) * (1.-weight)
+ weight * *(array_y+sup*n_columns_y+i) ;
return _SUCCESS_;
}
/**
* interpolate linearily to get y_i(x), when x and y_i are in two different arrays
*
* Called by transfer_interpolate_sources(); transfer_functions_at_k(); perturb_sources_at_eta().
*/
int array_interpolate_two_arrays_one_column(
double * array_x, /* assumed to be a vector (i.e. one column array) */
double * array_y,
int n_columns_y,
int index_y, /* between 0 and (n_columns_y-1) */
int n_lines, /** must be the same for array_x and array_y */
double x,
double * result,
ErrorMsg errmsg) {
int inf,sup,mid;
double weight;
inf=0;
sup=n_lines-1;
if (array_x[inf] < array_x[sup]){
class_test(x < array_x[inf],
errmsg,
"x=%e < x_min=%e",x,array_x[inf]);
class_test(x > array_x[sup],
errmsg,
"x=%e > x_max=%e",x,array_x[sup]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x < array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
else {
class_test(x < array_x[sup],
errmsg,
"x=%e < x_min=%e",x,array_x[sup]);
class_test(x > array_x[inf],
errmsg,
"x=%e > x_max=%e",x,array_x[inf]);
while (sup-inf > 1) {
mid=(int)(0.5*(inf+sup));
if (x > array_x[mid]) {sup=mid;}
else {inf=mid;}
}
}
weight=(x-array_x[inf])/(array_x[sup]-array_x[inf]);
*result = array_y[index_y*n_lines+inf] * (1.-weight)
+ weight * array_y[index_y*n_lines+sup];
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_interpolate_equal(
double * array,
int n_columns,
int n_lines,
double x,
double x_min,
double x_max,
double * result,
ErrorMsg errmsg) {
int index_minus,i;
double x_step,x_minus,weight;
if (x < x_min) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_min=%e",__func__,__LINE__,x,x_min);
return _FAILURE_;
}
if (x > x_max) {
sprintf(errmsg,"%s(L:%d) : x out of bounds: x=%e,x_max=%e",__func__,__LINE__,x,x_max);
return _FAILURE_;
}
x_step = (x_max-x_min)/(n_lines-1);
index_minus = (int)((x-x_min)/x_step);
x_minus = index_minus * x_step;
weight = (x-x_minus) / x_step;
for (i=0; i<n_columns; i++)
result[i] = *(array+n_columns*index_minus+i)*(1.-weight)
+ *(array+n_columns*(index_minus+1)+i)*weight;
return _SUCCESS_;
}
/**
* cubic interpolation of array with equally space abscisses
*/
int array_interpolate_cubic_equal(
double x0,
double dx,
double *yarray,
int Nx,
double x,
double * result,
ErrorMsg errmsg) {
int i;
double frac;
class_test((dx > 0 && (x<x0 || x>x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0,x0+dx*(Nx-1));
class_test((dx < 0 && (x>x0 || x<x0+dx*(Nx-1))),
errmsg,
"x=%e out of range [%e %e]",x,x0+dx*(Nx-1),x0);
i = (int)floor((x-x0)/dx);
if (i<1) i=1;
if (i>Nx-3) i=Nx-3;
frac = (x-x0)/dx-i;
yarray += i-1;
*result=-yarray[0]*frac*(1.-frac)*(2.-frac)/6.
+yarray[1]*(1.+frac)*(1.-frac)*(2.-frac)/2.
+yarray[2]*(1.+frac)*frac*(2.-frac)/2.
+yarray[3]*(1.+frac)*frac*(frac-1.)/6.;
return _SUCCESS_;
}
int array_interpolate_parabola(double x1,
double x2,
double x3,
double x,
double y1,
double y2,
double y3,
double * y,
double * dy,
double * ddy,
ErrorMsg errmsg) {
double a,b,c;
/*
a x_i**2 + b x_i + c = y_i
a (x1**2-x2**2) + b (x1-x2) = y1-y2
a (x3**2-x2**2) + b (x3-x2) = y3-y2
a (x1**2-x2**2)(x3**2-x2**2) + b (x1-x2)(x3**2-x2**2) = (y1-y2)(x3**2-x2**2)
a (x3**2-x2**2)(x1**2-x2**2) + b (x3-x2)(x1**2-x2**2) = (y3-y2)(x1**2-x2**2)
b = [(y1-y2)(x3**2-x2**2) - (y3-y2)(x1**2-x2**2)]/(x1-x2)(x3-x2)(x3-x1)
*/
b = ((y1-y2)*(x3-x2)*(x3+x2) - (y3-y2)*(x1-x2)*(x1+x2))/(x1-x2)/(x3-x2)/(x3-x1);
a = (y1-y2-b*(x1-x2))/(x1-x2)/(x1+x2);
c = y2 - b*x2 - a*x2*x2;
*y = a*x*x + b*x + c;
*dy = 2.*a*x + b;
*ddy = 2.*a;
return _SUCCESS_;
}
/**
* Called by transfer_solve().
*/
int array_integrate_all(
double * array,
int n_columns,
int n_lines,
int index_x, /** from 0 to (n_columns-1) */
int index_y,
double *result) {
int i;
double sum;
sum=0.;
for (i=1; i<n_lines; i++) {
sum += 0.5 * (*(array+i*n_columns+index_y) + *(array+(i-1)*n_columns+index_y))
* (*(array+i*n_columns+index_x) - *(array+(i-1)*n_columns+index_x));
}
*result = sum;
return _SUCCESS_;
}
int array_smooth_trg(double * array,
int k_size,
int starting_k,
int eta_size,
int index_eta,
int radius, /*3, 5 or 7 */
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
double *coeff;
smooth=malloc(k_size*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
class_calloc(coeff,2*radius+1,sizeof(double),errmsg);
switch(radius){
case 3:
weigth = 21;
coeff[0] = -2;
coeff[1] = 3;
coeff[2] = 6;
coeff[3] = 7;
coeff[4] = 6;
coeff[5] = 3;
coeff[6] = -2;
break;
case 4:
weigth = 231;
coeff[0] = -21;
coeff[1] = 14;
coeff[2] = 39;
coeff[3] = 54;
coeff[4] = 59;
coeff[5] = 54;
coeff[6] = 39;
coeff[7] = 14;
coeff[8] = -21;
break;
case 5:
weigth = 429;
coeff[0] = -36;
coeff[1] = 9;
coeff[2] = 44;
coeff[3] = 69;
coeff[4] = 84;
coeff[5] = 89;
coeff[6] = 84;
coeff[7] = 69;
coeff[8] = 44;
coeff[9] = 9;
coeff[10] = -36;
break;
case 6:
weigth = 143;
coeff[0] = -11;
coeff[1] = 0;
coeff[2] = 9;
coeff[3] = 16;
coeff[4] = 21;
coeff[5] = 24;
coeff[6] = 25;
coeff[7] = 24;
coeff[8] = 21;
coeff[9] = 16;
coeff[10] = 9;
coeff[11] = 0;
coeff[12] = -11;
break;
case 7:
weigth = 1105;
coeff[0] = -78;
coeff[1] = -13;
coeff[2] = 42;
coeff[3] = 87;
coeff[4] = 122;
coeff[5] = 147;
coeff[6] = 162;
coeff[7] = 167;
coeff[8] = 162;
coeff[9] = 147;
coeff[10] = 122;
coeff[11] = 87;
coeff[12] = 42;
coeff[13] = -13;
coeff[14] = -78;
break;
/* case 8: */
default:
class_stop(errmsg,"Non valid radius %d: please chose between 3 4 5 or 6\n",radius);
weigth=0;
break;
}
for (i=starting_k; i<k_size-radius; i++) {
smooth[i]=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,k_size-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += coeff[j-jmin]*array[j+k_size*index_eta];
}
smooth[i] /= weigth;
}
for (i=starting_k; i<k_size-radius; i++)
array[i+k_size*index_eta] = smooth[i];
free(smooth);
free(coeff);
return _SUCCESS_;
}
int array_smooth(double * array,
int n_columns,
int n_lines,
int index, /** from 0 to (n_columns-1) */
int radius,
ErrorMsg errmsg) {
double * smooth;
int i,j,jmin,jmax;
double weigth;
smooth=malloc(n_lines*sizeof(double));
if (smooth == NULL) {
sprintf(errmsg,"%s(L:%d) Cannot allocate smooth",__func__,__LINE__);
return _FAILURE_;
}
for (i=0; i<n_lines; i++) {
smooth[i]=0.;
weigth=0.;
jmin = MAX(i-radius,0);
jmax = MIN(i+radius,n_lines-1);
for (j=jmin; j <= jmax; j++) {
smooth[i] += array[j*n_columns+index];
weigth += 1.;
}
smooth[i] /= weigth;
}
for (i=0; i<n_lines; i++)
array[i*n_columns+index] = smooth[i];
free(smooth);
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, xhen x is in gorwing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_weights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point, w would normally be 0. */
if (n==1){
w_trapz[0] = 0.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[1]-x[0]);
w_trapz[n-1] = 0.5*(x[n-1]-x[n-2]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i+1]-x[i-1]);
}
}
return _SUCCESS_;
}
/**
* Compute quadrature weights for the trapezoidal integration method, when x is in decreasing order.
*
* @param x Input: Grid points on which f() is known.
* @param n Input: number of grid points.
* @param w_trapz Output: Weights of the trapezoidal method.
* @return the error status
*/
int array_trapezoidal_mweights(
double * __restrict__ x,
int n,
double * __restrict__ w_trapz,
ErrorMsg errmsg
) {
int i;
/* Case with just one point. */
if (n==1){
w_trapz[0] = 1.0;
}
else if (n>1){
//Set edgeweights:
w_trapz[0] = 0.5*(x[0]-x[1]);
w_trapz[n-1] = 0.5*(x[n-2]-x[n-1]);
//Set inner weights:
for (i=1; i<(n-1); i++){
w_trapz[i] = 0.5*(x[i-1]-x[i+1]);
}
}
return _SUCCESS_;
}
/**
* Compute integral of function using trapezoidal method.
*
* @param integrand Input: The function we are integrating.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_integral(
double * __restrict__ integrand,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
/**
* Compute convolution integral of product of two functions using trapezoidal method.
*
* @param integrand1 Input: Function 1.
* @param integrand2 Input: Function 2.
* @param n Input: Compute integral on grid [0;n-1].
* @param w_trapz Input: Weights of the trapezoidal method.
* @param I Output: The integral.
* @return the error status
*/
int array_trapezoidal_convolution(
double * __restrict__ integrand1,
double * __restrict__ integrand2,
int n,
double * __restrict__ w_trapz,
double * __restrict__ I,
ErrorMsg errmsg
) {
int i;
double res=0.0;
for (i=0; i<n; i++){
res += integrand1[i]*integrand2[i]*w_trapz[i];
}
*I = res;
return _SUCCESS_;
}
/**
* Compute weights and absiccas for Gaussian quadrature rules, including
* Gauss-Chebyshev integration (both versions)
* Gauss-Legendre integration
* Gauss-Legendre half-integration
* and normal trapezoidal integration
*
* See the arrays.h file for their descriptions, or open the manual.
*
* @param xarray Input: Allocated array of size N, in which absiccas are stored
* @param warray Input: Allocated array of size N, in which weights are stored
* @param N Input: Array size
* @param gauss_type Input: Method to use for integration
* @return the error status
*/
int array_weights_gauss(double* xarray, double* warray, int N,short gauss_type,ErrorMsg err_msg){
int i;
class_test(N<1,
err_msg,
"invalid array size for integration");
if(N==1){*xarray=0.0; *warray = 2.;return _SUCCESS_;}
if(gauss_type==gauss_type_chebyshev_1){
for(i=0;i<N;++i){
xarray[i]=cos((2.0*i+1.0)/(2.0*(double)N)*_PI_);
warray[i]=_PI_/(double)N;
}
return _SUCCESS_;
}
else if(gauss_type==gauss_type_chebyshev_2){
for(i=0;i<N;++i){
xarray[i]=cos((i+1.0)/((double)N+1.0)*_PI_);
double sinval = sin((i+1.0)/((double)N+1.0)*_PI_);
warray[i]=_PI_/((double)N+1.0)*sinval*sinval;
}
return _SUCCESS_;
}
else if(gauss_type==gauss_type_legendre){
int Nhalf,j;
double zero,zeroprev,pol,dpol,polprev,polnext;
Nhalf = 0.5*((double)(N+1));
for(i=0;i<Nhalf;++i){
zero = cos(_PI_*(2.0*i+1.5)/(2.0*(double)N+1.0));
do{
polnext = 1.0;
pol = 0.0;
for(j=1;j<=N;++j){
polprev = pol;
pol = polnext;
polnext = ((2.0*j-1.0)*zero*pol-(j-1.0)*polprev)/j;
}
dpol = N*(zero*polnext-pol)/(zero*zero-1.0);
zeroprev = zero;
zero = zeroprev-polnext/dpol;
}while(fabs(zero-zeroprev)>GAUSS_EPSILON);
xarray[i]=-zero;
xarray[N-1-i]=zero;
warray[i]=2.0/((1.0-zero*zero)*dpol*dpol);
warray[N-1-i]=warray[i];
}
return _SUCCESS_;
}
else if(gauss_type==gauss_type_legendre_half){
int Nhalf,j;
double zero,zeroprev,pol,dpol,polprev,polnext;
Nhalf = 0.5*((double)((2*N-1)+1));
for(i=0;i<Nhalf;++i){
zero = cos(_PI_*(2.0*i+1.5)/(2.0*(double)(2*N-1)+1.0));
do{
polnext = 1.0;
pol = 0.0;
for(j=1;j<=(2*N-1);++j){
polprev = pol;
pol = polnext;
polnext = ((2.0*j-1.0)*zero*pol-(j-1.0)*polprev)/j;
}
dpol = (2*N-1)*(zero*polnext-pol)/(zero*zero-1.0);
zeroprev = zero;
zero = zeroprev-polnext/dpol;
}while(fabs(zero-zeroprev)>GAUSS_EPSILON);
xarray[N-1-i]=2*zero-1;
warray[N-1-i]=4.0/((1.0-zero*zero)*dpol*dpol);
}
return _SUCCESS_;
}
/* This gauss-hermite method currently only works until N=150 due
* to numerical underflow of the weights giving NaN's otherwise.
* It will stay commented out for now.
* TODO :: Find a way of extending beyond N=150
* */
/*else if(gauss_type==gauss_type_hermite){
//Careful, has to go from -inf to inf, not -1 to 1
int j,newstep,Nhalf;
double polprev,pol,polnext,dpol,zero,zeroprev;
Nhalf = 0.5*((double)N+1.0);
class_test(N>150,err_msg,"Gauss-Hermite quadrature is not numerically viable with this huge N");
for(i=1;i<=Nhalf;++i){
if(i==1){
zero = sqrt(2.0*N+1.0)-1.85575*pow(2.0*N+1.0,-1.0/6.0);
}
else if(i==2){
zero -= 1.14*pow((double)N,0.426)/zero;
}
else if(i==3){
zero = 1.86*zero-0.86*xarray[0];
}
else if(i==4){
zero = 1.91*zero-0.91*xarray[1];
}
else{
zero = 2.0*zero-xarray[i-3];
}
for(newstep=1;newstep<=GAUSS_HERMITE_MAXITER;++newstep){
polnext = 0.25*_PI_;
pol = 0.0;
for(j=1;j<=N;++j){
polprev = pol;
pol = polprev;
polnext = zero*sqrt(2.0/j)*pol-sqrt(((double)(j-1.0))/j)*polprev;
}
dpol = sqrt(2.0*N)*pol;
zeroprev = zero;
zero = zeroprev - polnext/dpol;
if(fabs(zero-zeroprev)<=GAUSS_EPSILON){break;}
}
class_test((newstep>=GAUSS_HERMITE_MAXITER),
err_msg,
"no convergence of Newton's method during finding integration points for Gauss Hermite Quadrature."
);
xarray[i-1]=zero;
xarray[N-i]=-zero;
warray[i-1]=2.0/(dpol*dpol);
warray[N-i]=warray[i-1];
}
return _SUCCESS_;
}*/
else if(gauss_type==gauss_type_trapezoid){
for(i=0;i<N;++i){
xarray[i] = 2.0*((double)i/(double)(N-1))-1.0;
warray[i] = 2.0/(double)(N-1);
}
warray[0]*=0.5;
warray[N-1]*=0.5;
return _SUCCESS_;
}
else{
class_stop(err_msg,
"gauss integration type not recognized");
}
}
/**
* Compute weights and absiccas for Gaussian quadrature rules, including
* Gauss-Chebyshev integration (both versions)
* Gauss-Legendre integration
* and normal trapezoidal integration
*
* Additionally rescales the weights and abscissas to conform to a
* given integration region [xmin,xmax]
*
* @param xarray Input: Allocated array of size N, in which absiccas are stored
* @param warray Input: Allocated array of size N, in which weights are stored
* @param xmin Input: Minimum integration boundary
* @param xmax Input: Maximum integration boundary
* @param N Input: Array size
* @param gauss_type Input: Method to use for integration
* @return the error status
*/
int array_weights_gauss_limits(double* xarray, double* warray,double xmin,double xmax, int N,short gauss_type,ErrorMsg err_msg){
int i;
/* Obtain absiccas, weights in the -1 to 1 region */
array_weights_gauss(xarray,warray,N,gauss_type,err_msg);
/* Rescale absiccas, weights to the xmin,xmax region */
double xmean = 0.5*(xmax+xmin);
double xdelta = 0.5*(xmax-xmin);
for(i=1;i<N-1;++i){
xarray[i]=xdelta*xarray[i]+xmean;
warray[i]*=xdelta;
}
/* The first and last elements need to be precise */
xarray[0] = xmin;
warray[0]*=xdelta;
xarray[N-1] = xmax;
warray[N-1]*=xdelta;
return _SUCCESS_;
}
/**
* Rescales the weights and abscissas of a given
* Gauss-integration to conform to another
* integration region [xmin,xmax]
*
* This method allows the user to only compute the Gauss-weights once,
* and subsequently rescale for any required integration with the same
* number of total points N
*
* @param xarray Input: Previous absiccas
* @param warray Input: Previous weights
* @param xarray Input: Allocated array of size N, in which final absiccas are stored
* @param warray Input: Allocated array of size N, in which final weights are stored
* @param xmin Input: Minimum integration boundary
* @param xmax Input: Maximum integration boundary
* @param N Input: Array size
* @return the error status
*/
int array_weights_gauss_rescale_limits(double* xarray,double* warray,double* xarrayres,double* warrayres,double xmin,double xmax,int N,ErrorMsg err_msg){
int i;
double xmean = 0.5*(xmax+xmin);
double xdelta = 0.5*(xmax-xmin);
for(i=0;i<N;++i){
xarrayres[i]=xdelta*xarray[i]+xmean;
warrayres[i]=warray[i]*xdelta;
}
return _SUCCESS_;
}
|
template_cpu_02.h | /* Copyright 2015 The math21 Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#pragma once
#include "inner.h"
#define MATH21_IS_FROM_CPU
#include "../kernels/generic_02.kl"
#undef MATH21_IS_FROM_CPU
namespace math21 {
template<typename T>
void math21_template_tensor_f_shrink_cpu(NumN fname, NumN n, const T *x, T *y,
NumN dims_x, const NumN *dx, NumN dims_y, const NumN *dy,
NumN nb, const NumN *b,
NumN nv, NumN dims_v, const NumN *dv) {
x -= 1;
y -= 1;
dx -= 1;
dy -= 1;
b -= 1;
dv -= 1;
NumN id;
math21_type_f_min_like f_min_like = NULL;
math21_type_f_argmin_like f_argmin_like = NULL;
if (fname == m21_fname_sum) {
f_min_like = math21_device_f_sum;
} else if (fname == m21_fname_norm1) {
f_min_like = math21_device_f_norm1;
} else if (fname == m21_fname_norm2_square) {
f_min_like = math21_device_f_norm2_square;
} else if (fname == m21_fname_mean) {
f_min_like = math21_device_f_mean;
} else if (fname == m21_fname_max) {
f_min_like = math21_device_f_max;
} else if (fname == m21_fname_min) {
f_min_like = math21_device_f_min;
} else if (fname == m21_fname_argmax) {
f_argmin_like = math21_device_f_argmax;
} else if (fname == m21_fname_argmin) {
f_argmin_like = math21_device_f_argmin;
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
if (f_min_like) {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_f_shrink_cpu_kernel(f_min_like, n, x, y,
dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv, id);
}
} else {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_f_shrink_cpu_kernel(f_argmin_like, n, x, y,
dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv,
id);
}
}
}
template<typename T>
void math21_template_tensor_f_inner_product_like_shrink_cpu(NumN fname, NumN n,
const T *x1, const T *x2, T *y,
NumN dims_x, const NumN *dx, NumN dims_y,
const NumN *dy,
NumN nb, const NumN *b,
NumN nv, NumN dims_v, const NumN *dv) {
x1 -= 1;
x2 -= 1;
y -= 1;
dx -= 1;
dy -= 1;
b -= 1;
dv -= 1;
NumN id;
math21_type_f_inner_product_like f = NULL;
if (fname == m21_fname_inner_product) {
f = math21_device_f_inner_product;
} else if (fname == m21_fname_distance_1) {
f = math21_device_f_distance_1;
} else if (fname == m21_fname_distance_2_square) {
f = math21_device_f_distance_2_square;
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_f_inner_product_like_shrink_cpu_kernel(f, n, x1, x2, y,
dims_x, dx, dims_y, dy, nb, b, nv, dims_v, dv,
id);
}
}
template<typename T>
void math21_template_tensor_f_inner_product_like_bcshrink_cpu(NumN fname, NumN n,
const T *x1, const T *x2, T *y,
NumN dims_x1, const NumN *dx1, NumN dims_x2,
const NumN *dx2,
NumN dims_x, const NumN *dx, NumN dims_y,
const NumN *dy,
NumN nb, const NumN *b,
NumN nv, NumN dims_v, const NumN *dv) {
x1 -= 1;
x2 -= 1;
y -= 1;
dx1 -= 1;
dx2 -= 1;
dx -= 1;
dy -= 1;
b -= 1;
dv -= 1;
NumN id;
math21_type_f_inner_product_like f = NULL;
if (fname == m21_fname_inner_product) {
f = math21_device_f_inner_product;
} else if (fname == m21_fname_distance_1) {
f = math21_device_f_distance_1;
} else if (fname == m21_fname_distance_2_square) {
f = math21_device_f_distance_2_square;
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_f_inner_product_like_bcshrink_cpu_kernel(f, n, x1, x2, y,
dims_x1, dx1, dims_x2, dx2,
dims_x, dx, dims_y, dy, nb, b, nv, dims_v,
dv, id);
}
}
// todo: use index 1 for x, y
// a special kind of sub
// x is sub-tensor of y
template<typename T>
void math21_template_tensor_f_with_broadcast_in_dn_cpu(NumN fname, NumN n,
const T *x1,
const T *x2,
T *y,
NumN dims_x1, const NumN *dx1,
NumN dims_x2, const NumN *dx2,
NumN dims_y, const NumN *dy) {
x1 -= 1;
x2 -= 1;
y -= 1;
dx1 -= 1;
dx2 -= 1;
dy -= 1;
NumN id;
math21_type_f_add_like f_add_like = NULL;
if (fname == m21_fname_add) {
f_add_like = math21_device_f_add;
} else if (fname == m21_fname_subtract) {
f_add_like = math21_device_f_subtract;
} else if (fname == m21_fname_multiply) {
f_add_like = math21_device_f_multiply;
} else if (fname == m21_fname_divide) {
f_add_like = math21_device_f_divide;
} else if (fname == m21_fname_ele_is_equal) {
f_add_like = math21_device_f_is_equal;
} else if (fname == m21_fname_ele_is_less_than) {
f_add_like = math21_device_f_is_less_than;
} else if (fname == m21_fname_ele_is_not_less_than) {
f_add_like = math21_device_f_is_not_less_than;
} else if (fname == m21_fname_set_using_mask) {
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
if (fname == m21_fname_set_using_mask) {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_set_using_mask_in_dn_cpu_kernel(n,
x1, x2, y,
dims_x1, dx1,
dims_x2, dx2,
dims_y, dy, id);
}
} else {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_tensor_f_with_broadcast_in_dn_cpu_kernel(f_add_like, n,
x1, x2, y,
dims_x1, dx1,
dims_x2, dx2,
dims_y, dy, id);
}
}
}
// todo: use index 1 for x, y
template<typename T>
void math21_template_vector_f_add_like_cpu(NumN fname, NumN n,
const T *x1,
const T *x2,
T *y) {
x1 -= 1;
x2 -= 1;
y -= 1;
NumN id;
math21_type_f_add_like f_add_like = NULL;
if (fname == m21_fname_add) {
f_add_like = math21_device_f_add;
} else if (fname == m21_fname_subtract) {
f_add_like = math21_device_f_subtract;
} else if (fname == m21_fname_multiply) {
f_add_like = math21_device_f_multiply;
} else if (fname == m21_fname_divide) {
f_add_like = math21_device_f_divide;
} else if (fname == m21_fname_ele_is_equal) {
f_add_like = math21_device_f_is_equal;
} else if (fname == m21_fname_ele_is_less_than) {
f_add_like = math21_device_f_is_less_than;
} else if (fname == m21_fname_ele_is_not_less_than) {
f_add_like = math21_device_f_is_not_less_than;
}else if (fname == m21_fname_set_using_mask) {
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
if (fname == m21_fname_set_using_mask) {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_vector_set_using_mask_cpu_kernel(n, x1, x2, y, id);
}
} else {
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_vector_f_add_like_cpu_kernel(f_add_like, n,
x1, x2, y, id);
}
}
}
template<typename T>
void math21_template_vector_f_sin_like_cpu(NumN fname, NumN n,
const T *x, T *y) {
x -= 1;
y -= 1;
NumN id;
math21_type_f_sin_like f = NULL;
if (fname == m21_fname_sin) {
f = math21_device_f_sin;
} else if (fname == m21_fname_cos) {
f = math21_device_f_cos;
} else if (fname == m21_fname_tan) {
f = math21_device_f_tan;
} else if (fname == m21_fname_exp) {
f = math21_device_f_exp;
} else if (fname == m21_fname_log) {
f = math21_device_f_log;
} else if (fname == m21_fname_abs) {
f = math21_device_f_abs;
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_vector_f_sin_like_cpu_kernel(f, n, x, y, id);
}
}
template<typename T>
void math21_template_vector_f_kx_like_cpu(NumN fname, NumN n, T k,
const T *x, T *y) {
x -= 1;
y -= 1;
NumN id;
math21_type_f_kx_like f = NULL;
if (fname == m21_fname_kx_add) {
f = math21_device_f_add;
} else if (fname == m21_fname_kx_subtract) {
f = math21_device_f_subtract;
} else if (fname == m21_fname_xk_subtract) {
f = math21_device_f_xk_subtract;
} else if (fname == m21_fname_kx_mul) {
f = math21_device_f_multiply;
} else if (fname == m21_fname_kx_divide) {
f = math21_device_f_divide;
} else if (fname == m21_fname_xk_divide) {
f = math21_device_f_xk_divide;
} else if (fname == m21_fname_kx_pow) {
f = math21_device_f_kx_pow;
} else if (fname == m21_fname_xk_pow) {
f = math21_device_f_xk_pow;
} else {
MATH21_ASSERT(0, "not support calling function with name " << fname);
}
#pragma omp parallel for
for (id = 1; id <= n; ++id) {
math21_template_vector_f_kx_like_cpu_kernel(f, n, k, x, y, id);
}
}
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
stats.c | //-----------------------------------------------------------------------------
// stats.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (Build 5.1.001)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// 05/10/18 (Build 5.1.013)
// Author: L. Rossman (EPA)
// R. Dickinson (CDM)
//
// Simulation statistics functions.
//
// Build 5.1.007:
// - Exfiltration losses added to storage node statistics.
//
// Build 5.1.008:
// - Support for updating groundwater statistics added.
// - Support for updating maximum reported nodal depths added.
// - OpenMP parallelization applied to updating node and link flow statistics.
// - Updating of time that conduit is upstrm/dnstrm full was modified.
//
// Build 5.1.011:
// - Surcharging is now evaluated only under dynamic wave flow routing and
// storage nodes cannot be classified as surcharged.
//
// Build 5.1.012:
// - Time step statistics now evaluated only in non-steady state periods.
// - Check for full conduit flow now accounts for number of barrels.
//
// Build 5.1.013:
// - Include omp.h protected against lack of compiler support for OpenMP.
// - Statistics on impervious and pervious runoff totals added.
// - Storage nodes with a non-zero surcharge depth (e.g. enclosed tanks)
// can now be classified as being surcharged.
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "headers.h"
#include "swmm5.h"
#if defined(_OPENMP) //(5.1.013)
#include <omp.h>
#endif
//-----------------------------------------------------------------------------
// Shared variables
//-----------------------------------------------------------------------------
#define MAX_STATS 5
static TSysStats SysStats;
static TMaxStats MaxMassBalErrs[MAX_STATS];
static TMaxStats MaxCourantCrit[MAX_STATS];
static TMaxStats MaxFlowTurns[MAX_STATS];
static double SysOutfallFlow;
//-----------------------------------------------------------------------------
// Exportable variables (shared with statsrpt.c)
//-----------------------------------------------------------------------------
TSubcatchStats* SubcatchStats;
TNodeStats* NodeStats;
TLinkStats* LinkStats;
TStorageStats* StorageStats;
TOutfallStats* OutfallStats;
TPumpStats* PumpStats;
double MaxOutfallFlow;
double MaxRunoffFlow;
//-----------------------------------------------------------------------------
// Imported variables
//-----------------------------------------------------------------------------
extern double* NodeInflow; // defined in massbal.c
extern double* NodeOutflow; // defined in massbal.c
//-----------------------------------------------------------------------------
// External functions (declared in funcs.h)
//-----------------------------------------------------------------------------
// stats_open (called from swmm_start in swmm5.c)
// stats_close (called from swmm_end in swmm5.c)
// stats_report (called from swmm_end in swmm5.c)
// stats_updateSubcatchStats (called from subcatch_getRunoff)
// stats_updateGwaterStats (called from gwater_getGroundwater)
// stats_updateFlowStats (called from routing_execute)
// stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c)
// stats_updateMaxNodeDepth (called from output_saveNodeResults)
//-----------------------------------------------------------------------------
// Local functions
//-----------------------------------------------------------------------------
static void stats_updateNodeStats(int node, double tStep, DateTime aDate);
static void stats_updateLinkStats(int link, double tStep, DateTime aDate);
static void stats_findMaxStats(void);
static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x);
//=============================================================================
int stats_open()
//
// Input: none
// Output: returns an error code
// Purpose: opens the simulation statistics system.
//
{
int j, k;
// --- set all pointers to NULL
NodeStats = NULL;
LinkStats = NULL;
StorageStats = NULL;
OutfallStats = NULL;
PumpStats = NULL;
// --- allocate memory for & initialize subcatchment statistics
SubcatchStats = NULL;
if ( Nobjects[SUBCATCH] > 0 )
{
SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH],
sizeof(TSubcatchStats));
if ( !SubcatchStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
SubcatchStats[j].precip = 0.0;
SubcatchStats[j].runon = 0.0;
SubcatchStats[j].evap = 0.0;
SubcatchStats[j].infil = 0.0;
SubcatchStats[j].runoff = 0.0;
SubcatchStats[j].maxFlow = 0.0;
SubcatchStats[j].impervRunoff = 0.0; //(5.1.013)
SubcatchStats[j].pervRunoff = 0.0; //
}
for (j=0; j<Nobjects[SUBCATCH]; j++)
{
if ( Subcatch[j].groundwater == NULL ) continue;
Subcatch[j].groundwater->stats.avgUpperMoist = 0.0;
Subcatch[j].groundwater->stats.avgWaterTable = 0.0;
Subcatch[j].groundwater->stats.infil = 0.0;
Subcatch[j].groundwater->stats.latFlow = 0.0;
Subcatch[j].groundwater->stats.deepFlow = 0.0;
Subcatch[j].groundwater->stats.evap = 0.0;
Subcatch[j].groundwater->stats.maxFlow = 0.0;
}
}
// --- allocate memory for node & link stats
if ( Nobjects[LINK] > 0 )
{
NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats));
LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats));
if ( !NodeStats || !LinkStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
}
// --- initialize node stats
if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ )
{
NodeStats[j].avgDepth = 0.0;
NodeStats[j].maxDepth = 0.0;
NodeStats[j].maxDepthDate = StartDateTime;
NodeStats[j].maxRptDepth = 0.0;
NodeStats[j].volFlooded = 0.0;
NodeStats[j].timeFlooded = 0.0;
NodeStats[j].timeSurcharged = 0.0;
NodeStats[j].timeCourantCritical = 0.0;
NodeStats[j].totLatFlow = 0.0;
NodeStats[j].maxLatFlow = 0.0;
NodeStats[j].maxInflow = 0.0;
NodeStats[j].maxOverflow = 0.0;
NodeStats[j].maxPondedVol = 0.0;
NodeStats[j].maxInflowDate = StartDateTime;
NodeStats[j].maxOverflowDate = StartDateTime;
}
// --- initialize link stats
if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ )
{
LinkStats[j].maxFlow = 0.0;
LinkStats[j].maxVeloc = 0.0;
LinkStats[j].maxDepth = 0.0;
LinkStats[j].timeSurcharged = 0.0;
LinkStats[j].timeFullUpstream = 0.0;
LinkStats[j].timeFullDnstream = 0.0;
LinkStats[j].timeFullFlow = 0.0;
LinkStats[j].timeCapacityLimited = 0.0;
LinkStats[j].timeCourantCritical = 0.0;
for (k=0; k<MAX_FLOW_CLASSES; k++)
LinkStats[j].timeInFlowClass[k] = 0.0;
LinkStats[j].flowTurns = 0;
LinkStats[j].flowTurnSign = 0;
}
// --- allocate memory for & initialize storage unit statistics
if ( Nnodes[STORAGE] > 0 )
{
StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE],
sizeof(TStorageStats));
if ( !StorageStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( k = 0; k < Nobjects[NODE]; k++ )
{
if ( Node[k].type != STORAGE ) continue;
j = Node[k].subIndex;
StorageStats[j].initVol = Node[k].newVolume;
StorageStats[j].avgVol = 0.0;
StorageStats[j].maxVol = 0.0;
StorageStats[j].maxFlow = 0.0;
StorageStats[j].evapLosses = 0.0;
StorageStats[j].exfilLosses = 0.0;
StorageStats[j].maxVolDate = StartDateTime;
}
}
// --- allocate memory for & initialize outfall statistics
if ( Nnodes[OUTFALL] > 0 )
{
OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL],
sizeof(TOutfallStats));
if ( !OutfallStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nnodes[OUTFALL]; j++ )
{
OutfallStats[j].avgFlow = 0.0;
OutfallStats[j].maxFlow = 0.0;
OutfallStats[j].totalPeriods = 0;
if ( Nobjects[POLLUT] > 0 )
{
OutfallStats[j].totalLoad =
(double *) calloc(Nobjects[POLLUT], sizeof(double));
if ( !OutfallStats[j].totalLoad )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
for (k=0; k<Nobjects[POLLUT]; k++)
OutfallStats[j].totalLoad[k] = 0.0;
}
else OutfallStats[j].totalLoad = NULL;
}
}
// --- allocate memory & initialize pumping statistics
if ( Nlinks[PUMP] > 0 )
{
PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats));
if ( !PumpStats )
{
report_writeErrorMsg(ERR_MEMORY, "");
return ErrorCode;
}
else for ( j = 0; j < Nlinks[PUMP]; j++ )
{
PumpStats[j].utilized = 0.0;
PumpStats[j].minFlow = 0.0;
PumpStats[j].avgFlow = 0.0;
PumpStats[j].maxFlow = 0.0;
PumpStats[j].volume = 0.0;
PumpStats[j].energy = 0.0;
PumpStats[j].startUps = 0;
PumpStats[j].offCurveLow = 0.0;
PumpStats[j].offCurveHigh = 0.0;
}
}
// --- initialize system stats
MaxRunoffFlow = 0.0;
MaxOutfallFlow = 0.0;
SysStats.maxTimeStep = 0.0;
SysStats.minTimeStep = RouteStep;
SysStats.avgTimeStep = 0.0;
SysStats.avgStepCount = 0.0;
SysStats.steadyStateCount = 0.0;
return 0;
}
//=============================================================================
void stats_close()
//
// Input: none
// Output:
// Purpose: closes the simulation statistics system.
//
{
int j;
FREE(SubcatchStats);
FREE(NodeStats);
FREE(LinkStats);
FREE(StorageStats);
if ( OutfallStats )
{
for ( j=0; j<Nnodes[OUTFALL]; j++ )
FREE(OutfallStats[j].totalLoad);
FREE(OutfallStats);
}
FREE(PumpStats);
}
//=============================================================================
void stats_report()
//
// Input: none
// Output: none
// Purpose: reports simulation statistics.
//
{
// --- report flow routing accuracy statistics
if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING )
{
stats_findMaxStats();
report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS);
report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS);
report_writeSysStats(&SysStats);
}
// --- report summary statistics
statsrpt_writeReport();
}
//=============================================================================
void stats_updateSubcatchStats(int j, double rainVol, double runonVol,
double evapVol, double infilVol,
double impervVol, double pervVol,
double runoffVol, double runoff)
//
// Input: j = subcatchment index
// rainVol = rainfall + snowfall volume (ft3)
// runonVol = runon volume from other subcatchments (ft3)
// evapVol = evaporation volume (ft3)
// infilVol = infiltration volume (ft3)
// impervVol = impervious runoff volume (ft3)
// pervVol = pervious runoff volume (ft3)
// runoffVol = runoff volume (ft3)
// runoff = runoff rate (cfs)
// Output: none
// Purpose: updates totals of runoff components for a specific subcatchment.
//
{
SubcatchStats[j].precip += rainVol;
SubcatchStats[j].runon += runonVol;
SubcatchStats[j].evap += evapVol;
SubcatchStats[j].infil += infilVol;
SubcatchStats[j].runoff += runoffVol;
SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff);
SubcatchStats[j].impervRunoff += impervVol; //(5.1.013)
SubcatchStats[j].pervRunoff += pervVol; //
}
//=============================================================================
void stats_updateGwaterStats(int j, double infil, double evap, double latFlow,
double deepFlow, double theta, double waterTable,
double tStep)
{
Subcatch[j].groundwater->stats.infil += infil * tStep;
Subcatch[j].groundwater->stats.evap += evap * tStep;
Subcatch[j].groundwater->stats.latFlow += latFlow * tStep;
Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep;
Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep;
Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep;
Subcatch[j].groundwater->stats.finalUpperMoist = theta;
Subcatch[j].groundwater->stats.finalWaterTable = waterTable;
if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) )
{
Subcatch[j].groundwater->stats.maxFlow = latFlow;
}
}
//=============================================================================
void stats_updateMaxRunoff()
//
// Input: none
// Output: updates global variable MaxRunoffFlow
// Purpose: updates value of maximum system runoff rate.
//
{
int j;
double sysRunoff = 0.0;
for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff;
MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff);
}
//=============================================================================
void stats_updateMaxNodeDepth(int j, double depth)
//
// Input: j = node index
// depth = water depth at node at current reporting time (ft)
// Output: none
// Purpose: updates a node's maximum depth recorded at reporting times.
//
{
if ( NodeStats != NULL )
NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth);
}
//=============================================================================
void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount,
int steadyState)
//
// Input: tStep = routing time step (sec)
// aDate = current date/time
// stepCount = # steps required to solve routing at current time period
// steadyState = TRUE if steady flow conditions exist
// Output: none
// Purpose: updates various flow routing statistics at current time period.
//
{
int j;
// --- update stats only after reporting period begins
if ( aDate < ReportStart ) return;
SysOutfallFlow = 0.0;
// --- update node & link stats
#pragma omp parallel num_threads(NumThreads)
{
#pragma omp for
for ( j=0; j<Nobjects[NODE]; j++ )
stats_updateNodeStats(j, tStep, aDate);
#pragma omp for
for ( j=0; j<Nobjects[LINK]; j++ )
stats_updateLinkStats(j, tStep, aDate);
}
// --- update count of times in steady state
SysStats.steadyStateCount += steadyState;
// --- update time step stats if not in steady state
if ( steadyState == FALSE )
{
// --- skip initial time step for min. value)
if ( OldRoutingTime > 0 )
{
SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep);
}
SysStats.avgTimeStep += tStep;
SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep);
// --- update iteration step count stats
SysStats.avgStepCount += stepCount;
}
// --- update max. system outfall flow
MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow);
}
//=============================================================================
void stats_updateCriticalTimeCount(int node, int link)
//
// Input: node = node index
// link = link index
// Output: none
// Purpose: updates count of times a node or link was time step-critical.
//
{
if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0;
else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0;
}
//=============================================================================
void stats_updateNodeStats(int j, double tStep, DateTime aDate)
//
// Input: j = node index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a node.
//
{
int k, p;
double newVolume = Node[j].newVolume;
double newDepth = Node[j].newDepth;
double yCrown = Node[j].crownElev - Node[j].invertElev;
int canPond = (AllowPonding && Node[j].pondedArea > 0.0);
// --- update depth statistics
NodeStats[j].avgDepth += newDepth;
if ( newDepth > NodeStats[j].maxDepth )
{
NodeStats[j].maxDepth = newDepth;
NodeStats[j].maxDepthDate = aDate;
}
// --- update flooding, ponding, and surcharge statistics
if ( Node[j].type != OUTFALL )
{
if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 )
{
NodeStats[j].timeFlooded += tStep;
NodeStats[j].volFlooded += Node[j].overflow * tStep;
if ( canPond ) NodeStats[j].maxPondedVol =
MAX(NodeStats[j].maxPondedVol,
(newVolume - Node[j].fullVolume));
}
// --- for dynamic wave routing, classify a node as //(5.1.013)
// surcharged if its water level exceeds its crown elev.
if (RouteModel == DW) //(5.1.013)
{
if ((Node[j].type != STORAGE || Node[j].surDepth > 0.0) && //(5.1.013)
newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev)
{
NodeStats[j].timeSurcharged += tStep;
}
}
}
// --- update storage statistics
if ( Node[j].type == STORAGE )
{
k = Node[j].subIndex;
StorageStats[k].avgVol += newVolume;
StorageStats[k].evapLosses +=
Storage[Node[j].subIndex].evapLoss;
StorageStats[k].exfilLosses +=
Storage[Node[j].subIndex].exfilLoss;
newVolume = MIN(newVolume, Node[j].fullVolume);
if ( newVolume > StorageStats[k].maxVol )
{
StorageStats[k].maxVol = newVolume;
StorageStats[k].maxVolDate = aDate;
}
StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow);
}
// --- update outfall statistics
if ( Node[j].type == OUTFALL )
{
k = Node[j].subIndex;
if ( Node[j].inflow >= MIN_RUNOFF_FLOW )
{
OutfallStats[k].avgFlow += Node[j].inflow;
OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow);
OutfallStats[k].totalPeriods++;
}
for (p=0; p<Nobjects[POLLUT]; p++)
{
OutfallStats[k].totalLoad[p] += Node[j].inflow *
Node[j].newQual[p] * tStep;
}
SysOutfallFlow += Node[j].inflow;
}
// --- update inflow statistics
NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) *
0.5 * tStep );
if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) )
NodeStats[j].maxLatFlow = Node[j].newLatFlow;
if ( Node[j].inflow > NodeStats[j].maxInflow )
{
NodeStats[j].maxInflow = Node[j].inflow;
NodeStats[j].maxInflowDate = aDate;
}
// --- update overflow statistics
if ( Node[j].overflow > NodeStats[j].maxOverflow )
{
NodeStats[j].maxOverflow = Node[j].overflow;
NodeStats[j].maxOverflowDate = aDate;
}
}
//=============================================================================
void stats_updateLinkStats(int j, double tStep, DateTime aDate)
//
// Input: j = link index
// tStep = routing time step (sec)
// aDate = current date/time
// Output: none
// Purpose: updates flow statistics for a link.
//
{
int k;
double q, v;
double dq;
// --- update max. flow
dq = Link[j].newFlow - Link[j].oldFlow;
q = fabs(Link[j].newFlow);
if ( q > LinkStats[j].maxFlow )
{
LinkStats[j].maxFlow = q;
LinkStats[j].maxFlowDate = aDate;
}
// --- update max. velocity
v = link_getVelocity(j, q, Link[j].newDepth);
if ( v > LinkStats[j].maxVeloc )
{
LinkStats[j].maxVeloc = v;
}
// --- update max. depth
if ( Link[j].newDepth > LinkStats[j].maxDepth )
{
LinkStats[j].maxDepth = Link[j].newDepth;
}
if ( Link[j].type == PUMP )
{
if ( q >= Link[j].qFull )
LinkStats[j].timeFullFlow += tStep;
if ( q > MIN_RUNOFF_FLOW )
{
k = Link[j].subIndex;
PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q);
PumpStats[k].maxFlow = LinkStats[j].maxFlow;
PumpStats[k].avgFlow += q;
PumpStats[k].volume += q*tStep;
PumpStats[k].utilized += tStep;
PumpStats[k].energy += link_getPower(j)*tStep/3600.0;
if ( Link[j].flowClass == DN_DRY )
PumpStats[k].offCurveLow += tStep;
if ( Link[j].flowClass == UP_DRY )
PumpStats[k].offCurveHigh += tStep;
if ( Link[j].oldFlow < MIN_RUNOFF_FLOW )
PumpStats[k].startUps++;
PumpStats[k].totalPeriods++;
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
}
}
else if ( Link[j].type == CONDUIT )
{
// --- update time under normal flow & inlet control
if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep;
if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep;
// --- update flow classification distribution
k = Link[j].flowClass;
if ( k >= 0 && k < MAX_FLOW_CLASSES )
{
++LinkStats[j].timeInFlowClass[k];
}
// --- update time conduit is full
k = Link[j].subIndex;
if ( q >= Link[j].qFull * (double)Conduit[k].barrels )
LinkStats[j].timeFullFlow += tStep;
if ( Conduit[k].capacityLimited )
LinkStats[j].timeCapacityLimited += tStep;
switch (Conduit[k].fullState)
{
case ALL_FULL:
LinkStats[j].timeSurcharged += tStep;
LinkStats[j].timeFullUpstream += tStep;
LinkStats[j].timeFullDnstream += tStep;
break;
case UP_FULL:
LinkStats[j].timeFullUpstream += tStep;
break;
case DN_FULL:
LinkStats[j].timeFullDnstream += tStep;
}
}
// --- update flow turn count
k = LinkStats[j].flowTurnSign;
LinkStats[j].flowTurnSign = SGN(dq);
if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 )
LinkStats[j].flowTurns++;
}
//=============================================================================
void stats_findMaxStats()
//
// Input: none
// Output: none
// Purpose: finds nodes & links with highest mass balance errors
// & highest times Courant time-step critical.
//
{
int j;
double x;
// --- initialize max. stats arrays
for (j=0; j<MAX_STATS; j++)
{
MaxMassBalErrs[j].objType = NODE;
MaxMassBalErrs[j].index = -1;
MaxMassBalErrs[j].value = -1.0;
MaxCourantCrit[j].index = -1;
MaxCourantCrit[j].value = -1.0;
MaxFlowTurns[j].index = -1;
MaxFlowTurns[j].value = -1.0;
}
// --- find links with most flow turns
if ( StepCount > 2 )
{
for (j=0; j<Nobjects[LINK]; j++)
{
x = 100.0 * LinkStats[j].flowTurns / (2./3.*(StepCount-2));
stats_updateMaxStats(MaxFlowTurns, LINK, j, x);
}
}
// --- find nodes with largest mass balance errors
for (j=0; j<Nobjects[NODE]; j++)
{
// --- skip terminal nodes and nodes with negligible inflow
if ( Node[j].degree <= 0 ) continue;
if ( NodeInflow[j] <= 0.1 ) continue;
// --- evaluate mass balance error
// (Note: NodeInflow & NodeOutflow include any initial and final
// stored volumes, respectively).
if ( NodeInflow[j] > 0.0 )
x = 1.0 - NodeOutflow[j] / NodeInflow[j];
else if ( NodeOutflow[j] > 0.0 ) x = -1.0;
else x = 0.0;
stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x);
}
// --- stop if not using a variable time step
if ( RouteModel != DW || CourantFactor == 0.0 ) return;
// --- find nodes most frequently Courant critical
if ( StepCount == 0 ) return;
for (j=0; j<Nobjects[NODE]; j++)
{
x = NodeStats[j].timeCourantCritical / StepCount;
stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x);
}
// --- find links most frequently Courant critical
for (j=0; j<Nobjects[LINK]; j++)
{
x = LinkStats[j].timeCourantCritical / StepCount;
stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x);
}
}
//=============================================================================
void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x)
//
// Input: maxStats[] = array of critical statistics values
// i = object category (NODE or LINK)
// j = object index
// x = value of statistic for the object
// Output: none
// Purpose: updates the collection of most critical statistics
//
{
int k;
TMaxStats maxStats1, maxStats2;
maxStats1.objType = i;
maxStats1.index = j;
maxStats1.value = x;
for (k=0; k<MAX_STATS; k++)
{
if ( fabs(maxStats1.value) > fabs(maxStats[k].value) )
{
maxStats2 = maxStats[k];
maxStats[k] = maxStats1;
maxStats1 = maxStats2;
}
}
}
|
primes.c | #include <stdio.h>
#include <omp.h>
int is_trivial_prime(int n)
{
return n <= 2;
}
int is_divisible(int n, int d)
{
return n % d == 0;
}
int is_prime(int n)
{
if (is_trivial_prime(n))
return 1;
for (int d = 3; d < n; d += 2)
if (is_divisible(n, d))
return 0;
return 1;
}
int main()
{
int count = 0;
#pragma omp parallel for reduction(+:count)
for (int n = 2; n < 200000; ++n)
if (is_prime(n))
++count;
printf("Primes found: %d\n", count);
return 0;
}
|
fdtd-2d.pluto.par.l1tile.c |
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define tmax T
#define nx N
#define ny N
double ex[nx][ny +1];
double ey[nx +1][ny];
double hz[nx][ny];
void init_arrays()
{
int i, j;
for (i=0; i<nx+1; i++) {
for (j=0; j<ny; j++) {
ey[i][j] = 0;
}
}
for (i=0; i<nx; i++) {
for (j=0; j<ny+1; j++) {
ex[i][j] = 0;
}
}
for (j=0; j<ny; j++) {
ey[0][j] = ((double)j)/ny;
}
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
hz[i][j] = 0;
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
#include <math.h>
#include <assert.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6;
register int lb, ub, lb1, ub1, lb2, ub2;
register int lbv, ubv;
for (c1=-1;c1<=floord(2*tmax+ny-2,32);c1++) {
lb1=max(max(ceild(32*c1-tmax+1,32),0),ceild(32*c1-31,64));
ub1=min(min(floord(32*c1+31,32),floord(32*c1+ny+31,64)),floord(tmax+ny-1,32));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(max(max(max(max(max(max(max(max(max(ceild(32*c2-ny-30,32),0),ceild(32*c1-32*c2-31*ny-899,992)),ceild(64*c1-96*c2-61,32)),ceild(1024*c1-2016*c2-30*nx-931,32)),ceild(992*c1-1952*c2-30*nx-ny-899,32)),ceild(32*c1-1024*c2-30*nx-931,32)),ceild(32*c1-32*c2-ny-29,32)),ceild(32*c1-32*c2-31,32)),ceild(32*c1-1024*c2-1891,992)),ceild(32*c1-992*c2-30*nx-ny-899,32));c3<=min(min(min(floord(tmax+nx-1,32),floord(32*c1-32*c2+nx+31,32)),floord(32*c2+nx+30,32)),c1+31*c2+nx+30);c3++) {
if ((c1 <= floord(32*c2+32*c3-nx,32)) && (c2 <= floord(32*c3-nx+ny,32)) && (c3 >= ceild(nx,32))) {
for (c5=max(32*c3-nx+1,32*c2);c5<=min(32*c3-nx+ny,32*c2+31);c5++) {
{hz[nx-1][-32*c3+c5+nx-1]=hz[nx-1][-32*c3+c5+nx-1]-((double)(7))/10*(ey[1+nx-1][-32*c3+c5+nx-1]+ex[nx-1][1+-32*c3+c5+nx-1]-ex[nx-1][-32*c3+c5+nx-1]-ey[nx-1][-32*c3+c5+nx-1]);} ;
}
}
if ((c1 <= floord(64*c2-ny,32)) && (c2 >= max(ceild(ny,32),ceild(32*c3-nx+ny+1,32)))) {
for (c6=max(32*c2-ny+1,32*c3);c6<=min(32*c2+nx-ny,32*c3+31);c6++) {
{hz[-32*c2+c6+ny-1][ny-1]=hz[-32*c2+c6+ny-1][ny-1]-((double)(7))/10*(ey[1+-32*c2+c6+ny-1][ny-1]+ex[-32*c2+c6+ny-1][1+ny-1]-ex[-32*c2+c6+ny-1][ny-1]-ey[-32*c2+c6+ny-1][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (nx >= 2)) {
for (c4=max(max(0,32*c3),32*c2-ny+1);c4<=min(min(min(32*c2-1,32*c3-nx+31),tmax-1),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
for (c6=c4+1;c6<=c4+nx;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (nx >= 2) && (ny >= 2)) {
for (c4=max(max(32*c2,0),32*c3);c4<=min(min(32*c3-nx+31,tmax-1),32*c2-ny+31);c4++) {
{ey[0][0]=c4;} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
for (c6=c4+1;c6<=c4+nx;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (nx >= 2)) {
for (c4=max(max(0,32*c3),32*c2-ny+32);c4<=min(min(tmax-1,32*c2-1),32*c3-nx+31);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(max(32*c3-nx+32,0),32*c3),32*c2-ny+1);c4<=min(min(min(32*c2-1,tmax-1),32*c2-ny+31),32*c3+30);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
for (c6=c4+1;c6<=32*c3+31;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
if ((c1 == c2+c3) && (nx >= 2)) {
for (c4=max(max(max(32*c2,0),32*c3),32*c2-ny+32);c4<=min(min(tmax-1,32*c2+30),32*c3-nx+31);c4++) {
{ey[0][0]=c4;} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
}
}
if ((c1 == c2+c3) && (ny >= 2)) {
for (c4=max(max(max(32*c2,32*c3-nx+32),0),32*c3);c4<=min(min(tmax-1,32*c2-ny+31),32*c3+30);c4++) {
{ey[0][0]=c4;} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
for (c6=c4+1;c6<=32*c3+31;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(max(0,32*c3),32*c3-nx+32),32*c2-ny+32);c4<=min(min(tmax-1,32*c3+30),32*c2-1);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
}
}
if (c1 == c2+c3) {
for (c4=max(max(max(max(32*c2,32*c3-nx+32),0),32*c3),32*c2-ny+32);c4<=min(min(tmax-1,32*c3+30),32*c2+30);c4++) {
{ey[0][0]=c4;} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
for (c6=c4+1;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
}
}
if ((c1 == c2+c3) && (nx >= 2) && (ny == 1)) {
for (c4=max(max(0,32*c3),32*c2);c4<=min(min(tmax-1,32*c2+30),32*c3+30);c4++) {
{ey[0][0]=c4;} ;
for (c6=c4+1;c6<=min(c4+nx-1,32*c3+31);c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c6=c4+1;c6<=min(32*c3+31,c4+nx);c6++) {
{hz[-c4+c6-1][0]=hz[-c4+c6-1][0]-((double)(7))/10*(ey[1+-c4+c6-1][0]+ex[-c4+c6-1][1+0]-ex[-c4+c6-1][0]-ey[-c4+c6-1][0]);} ;
}
}
}
if ((c1 == c2+c3) && (nx == 1)) {
for (c4=max(max(0,32*c3),32*c2-ny+1);c4<=min(min(min(32*c2-1,tmax-1),32*c2-ny+31),32*c3+30);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
{hz[0][-c4+c5-1]=hz[0][-c4+c5-1]-((double)(7))/10*(ey[1+0][-c4+c5-1]+ex[0][1+-c4+c5-1]-ex[0][-c4+c5-1]-ey[0][-c4+c5-1]);} ;
}
{hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (nx == 1) && (ny >= 2)) {
for (c4=max(max(32*c2,0),32*c3);c4<=min(min(tmax-1,32*c2-ny+31),32*c3+30);c4++) {
{ey[0][0]=c4;} ;
for (c5=c4+1;c5<=c4+ny-1;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
{hz[0][-c4+c5-1]=hz[0][-c4+c5-1]-((double)(7))/10*(ey[1+0][-c4+c5-1]+ex[0][1+-c4+c5-1]-ex[0][-c4+c5-1]-ey[0][-c4+c5-1]);} ;
}
{hz[0][ny-1]=hz[0][ny-1]-((double)(7))/10*(ey[1+0][ny-1]+ex[0][1+ny-1]-ex[0][ny-1]-ey[0][ny-1]);} ;
}
}
if ((c1 == c2+c3) && (nx == 1)) {
for (c4=max(max(0,32*c3),32*c2-ny+32);c4<=min(min(tmax-1,32*c3+30),32*c2-1);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
{hz[0][-c4+c5-1]=hz[0][-c4+c5-1]-((double)(7))/10*(ey[1+0][-c4+c5-1]+ex[0][1+-c4+c5-1]-ex[0][-c4+c5-1]-ey[0][-c4+c5-1]);} ;
}
}
}
if ((c1 == c2+c3) && (nx == 1)) {
for (c4=max(max(max(32*c2,0),32*c3),32*c2-ny+32);c4<=min(min(tmax-1,32*c3+30),32*c2+30);c4++) {
{ey[0][0]=c4;} ;
for (c5=c4+1;c5<=32*c2+31;c5++) {
{ey[0][-c4+c5]=c4;} ;
{ex[0][-c4+c5]=ex[0][-c4+c5]-((double)(1))/2*(hz[0][-c4+c5]-hz[0][-c4+c5-1]);} ;
{hz[0][-c4+c5-1]=hz[0][-c4+c5-1]-((double)(7))/10*(ey[1+0][-c4+c5-1]+ex[0][1+-c4+c5-1]-ex[0][-c4+c5-1]-ey[0][-c4+c5-1]);} ;
}
}
}
if ((c1 == c2+c3) && (nx == 1) && (ny == 1)) {
for (c4=max(max(0,32*c3),32*c2);c4<=min(min(tmax-1,32*c2+30),32*c3+30);c4++) {
{ey[0][0]=c4;} ;
{hz[0][0]=hz[0][0]-((double)(7))/10*(ey[1+0][0]+ex[0][1+0]-ex[0][0]-ey[0][0]);} ;
}
}
for (c4=max(max(max(0,32*c3-nx+1),32*c2-ny+1),32*c1-32*c2);c4<=min(min(min(min(min(32*c2-1,32*c3-nx+31),32*c3-1),tmax-1),32*c1-32*c2+31),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
for (c6=32*c3;c6<=c4+nx;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
if (ny >= 2) {
for (c4=max(max(max(32*c2,0),32*c3-nx+1),32*c1-32*c2);c4<=min(min(min(min(32*c3-nx+31,32*c3-1),tmax-1),32*c1-32*c2+31),32*c2-ny+31);c4++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
for (c6=32*c3;c6<=c4+nx;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
for (c4=max(max(max(0,32*c1-32*c2),32*c3-nx+1),32*c2-ny+32);c4<=min(min(min(min(tmax-1,32*c3-1),32*c1-32*c2+31),32*c2-1),32*c3-nx+31);c4++) {
for (c5=32*c2;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
}
for (c4=max(max(max(32*c3-nx+32,0),32*c2-ny+1),32*c1-32*c2);c4<=min(min(min(min(32*c2-1,32*c3-1),tmax-1),32*c1-32*c2+31),32*c2-ny+31);c4++) {
for (c5=32*c2;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
for (c6=32*c3;c6<=32*c3+31;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
for (c4=max(max(max(max(32*c2,0),32*c1-32*c2),32*c3-nx+1),32*c2-ny+32);c4<=min(min(min(min(tmax-1,32*c3-1),32*c1-32*c2+31),32*c2+30),32*c3-nx+31);c4++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=c4+nx-1;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
{hz[nx-1][-c4+c5-1]=hz[nx-1][-c4+c5-1]-((double)(7))/10*(ey[1+nx-1][-c4+c5-1]+ex[nx-1][1+-c4+c5-1]-ex[nx-1][-c4+c5-1]-ey[nx-1][-c4+c5-1]);} ;
}
}
if (ny >= 2) {
for (c4=max(max(max(32*c2,32*c3-nx+32),0),32*c1-32*c2);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2-ny+31);c4++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=c4+ny-1;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
for (c6=32*c3;c6<=32*c3+31;c6++) {
{hz[-c4+c6-1][ny-1]=hz[-c4+c6-1][ny-1]-((double)(7))/10*(ey[1+-c4+c6-1][ny-1]+ex[-c4+c6-1][1+ny-1]-ex[-c4+c6-1][ny-1]-ey[-c4+c6-1][ny-1]);} ;
}
}
}
for (c4=max(max(max(0,32*c1-32*c2),32*c3-nx+32),32*c2-ny+32);c4<=min(min(min(tmax-1,32*c3-1),32*c1-32*c2+31),32*c2-1);c4++) {
/*@ begin Loop(
transform UnrollJam(ufactor=4)
for (c5=32*c2;c5<=32*c2+31;c5++) {
transform UnrollJam(ufactor=4)
for (c6=32*c3;c6<=32*c3+31;c6++) {
ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);
ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);
hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);
}
}
) @*/ {
for (c5=32*c2; c5<=32*c2+28; c5=c5+4) {
for (c6=32*c3; c6<=32*c3+28; c6=c6+4) {
ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);
ey[-c4+c6][-c4+c5+1]=ey[-c4+c6][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+1]-hz[-c4+c6-1][-c4+c5+1]);
ey[-c4+c6][-c4+c5+2]=ey[-c4+c6][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+2]-hz[-c4+c6-1][-c4+c5+2]);
ey[-c4+c6][-c4+c5+3]=ey[-c4+c6][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+3]-hz[-c4+c6-1][-c4+c5+3]);
ey[-c4+c6+1][-c4+c5]=ey[-c4+c6+1][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5]-hz[-c4+c6][-c4+c5]);
ey[-c4+c6+1][-c4+c5+1]=ey[-c4+c6+1][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+1]-hz[-c4+c6][-c4+c5+1]);
ey[-c4+c6+1][-c4+c5+2]=ey[-c4+c6+1][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+2]-hz[-c4+c6][-c4+c5+2]);
ey[-c4+c6+1][-c4+c5+3]=ey[-c4+c6+1][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+3]-hz[-c4+c6][-c4+c5+3]);
ey[-c4+c6+2][-c4+c5]=ey[-c4+c6+2][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5]-hz[-c4+c6+1][-c4+c5]);
ey[-c4+c6+2][-c4+c5+1]=ey[-c4+c6+2][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+1]-hz[-c4+c6+1][-c4+c5+1]);
ey[-c4+c6+2][-c4+c5+2]=ey[-c4+c6+2][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+2]-hz[-c4+c6+1][-c4+c5+2]);
ey[-c4+c6+2][-c4+c5+3]=ey[-c4+c6+2][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+3]-hz[-c4+c6+1][-c4+c5+3]);
ey[-c4+c6+3][-c4+c5]=ey[-c4+c6+3][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5]-hz[-c4+c6+2][-c4+c5]);
ey[-c4+c6+3][-c4+c5+1]=ey[-c4+c6+3][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+1]-hz[-c4+c6+2][-c4+c5+1]);
ey[-c4+c6+3][-c4+c5+2]=ey[-c4+c6+3][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+2]-hz[-c4+c6+2][-c4+c5+2]);
ey[-c4+c6+3][-c4+c5+3]=ey[-c4+c6+3][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+3]-hz[-c4+c6+2][-c4+c5+3]);
ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);
ex[-c4+c6][-c4+c5+1]=ex[-c4+c6][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+1]-hz[-c4+c6][-c4+c5]);
ex[-c4+c6][-c4+c5+2]=ex[-c4+c6][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+2]-hz[-c4+c6][-c4+c5+1]);
ex[-c4+c6][-c4+c5+3]=ex[-c4+c6][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+3]-hz[-c4+c6][-c4+c5+2]);
ex[-c4+c6+1][-c4+c5]=ex[-c4+c6+1][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5]-hz[-c4+c6+1][-c4+c5-1]);
ex[-c4+c6+1][-c4+c5+1]=ex[-c4+c6+1][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+1]-hz[-c4+c6+1][-c4+c5]);
ex[-c4+c6+1][-c4+c5+2]=ex[-c4+c6+1][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+2]-hz[-c4+c6+1][-c4+c5+1]);
ex[-c4+c6+1][-c4+c5+3]=ex[-c4+c6+1][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5+3]-hz[-c4+c6+1][-c4+c5+2]);
ex[-c4+c6+2][-c4+c5]=ex[-c4+c6+2][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5]-hz[-c4+c6+2][-c4+c5-1]);
ex[-c4+c6+2][-c4+c5+1]=ex[-c4+c6+2][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+1]-hz[-c4+c6+2][-c4+c5]);
ex[-c4+c6+2][-c4+c5+2]=ex[-c4+c6+2][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+2]-hz[-c4+c6+2][-c4+c5+1]);
ex[-c4+c6+2][-c4+c5+3]=ex[-c4+c6+2][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5+3]-hz[-c4+c6+2][-c4+c5+2]);
ex[-c4+c6+3][-c4+c5]=ex[-c4+c6+3][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5]-hz[-c4+c6+3][-c4+c5-1]);
ex[-c4+c6+3][-c4+c5+1]=ex[-c4+c6+3][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+1]-hz[-c4+c6+3][-c4+c5]);
ex[-c4+c6+3][-c4+c5+2]=ex[-c4+c6+3][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+2]-hz[-c4+c6+3][-c4+c5+1]);
ex[-c4+c6+3][-c4+c5+3]=ex[-c4+c6+3][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5+3]-hz[-c4+c6+3][-c4+c5+2]);
hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5-1]+ex[-c4+c6-1][-c4+c5]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);
hz[-c4+c6-1][-c4+c5]=hz[-c4+c6-1][-c4+c5]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5]+ex[-c4+c6-1][-c4+c5+1]-ex[-c4+c6-1][-c4+c5]-ey[-c4+c6-1][-c4+c5]);
hz[-c4+c6-1][-c4+c5+1]=hz[-c4+c6-1][-c4+c5+1]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5+1]+ex[-c4+c6-1][-c4+c5+2]-ex[-c4+c6-1][-c4+c5+1]-ey[-c4+c6-1][-c4+c5+1]);
hz[-c4+c6-1][-c4+c5+2]=hz[-c4+c6-1][-c4+c5+2]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5+2]+ex[-c4+c6-1][-c4+c5+3]-ex[-c4+c6-1][-c4+c5+2]-ey[-c4+c6-1][-c4+c5+2]);
hz[-c4+c6][-c4+c5-1]=hz[-c4+c6][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+1][-c4+c5-1]+ex[-c4+c6][-c4+c5]-ex[-c4+c6][-c4+c5-1]-ey[-c4+c6][-c4+c5-1]);
hz[-c4+c6][-c4+c5]=hz[-c4+c6][-c4+c5]-0.1*((double)(7))*(ey[-c4+c6+1][-c4+c5]+ex[-c4+c6][-c4+c5+1]-ex[-c4+c6][-c4+c5]-ey[-c4+c6][-c4+c5]);
hz[-c4+c6][-c4+c5+1]=hz[-c4+c6][-c4+c5+1]-0.1*((double)(7))*(ey[-c4+c6+1][-c4+c5+1]+ex[-c4+c6][-c4+c5+2]-ex[-c4+c6][-c4+c5+1]-ey[-c4+c6][-c4+c5+1]);
hz[-c4+c6][-c4+c5+2]=hz[-c4+c6][-c4+c5+2]-0.1*((double)(7))*(ey[-c4+c6+1][-c4+c5+2]+ex[-c4+c6][-c4+c5+3]-ex[-c4+c6][-c4+c5+2]-ey[-c4+c6][-c4+c5+2]);
hz[-c4+c6+1][-c4+c5-1]=hz[-c4+c6+1][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+2][-c4+c5-1]+ex[-c4+c6+1][-c4+c5]-ex[-c4+c6+1][-c4+c5-1]-ey[-c4+c6+1][-c4+c5-1]);
hz[-c4+c6+1][-c4+c5]=hz[-c4+c6+1][-c4+c5]-0.1*((double)(7))*(ey[-c4+c6+2][-c4+c5]+ex[-c4+c6+1][-c4+c5+1]-ex[-c4+c6+1][-c4+c5]-ey[-c4+c6+1][-c4+c5]);
hz[-c4+c6+1][-c4+c5+1]=hz[-c4+c6+1][-c4+c5+1]-0.1*((double)(7))*(ey[-c4+c6+2][-c4+c5+1]+ex[-c4+c6+1][-c4+c5+2]-ex[-c4+c6+1][-c4+c5+1]-ey[-c4+c6+1][-c4+c5+1]);
hz[-c4+c6+1][-c4+c5+2]=hz[-c4+c6+1][-c4+c5+2]-0.1*((double)(7))*(ey[-c4+c6+2][-c4+c5+2]+ex[-c4+c6+1][-c4+c5+3]-ex[-c4+c6+1][-c4+c5+2]-ey[-c4+c6+1][-c4+c5+2]);
hz[-c4+c6+2][-c4+c5-1]=hz[-c4+c6+2][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+3][-c4+c5-1]+ex[-c4+c6+2][-c4+c5]-ex[-c4+c6+2][-c4+c5-1]-ey[-c4+c6+2][-c4+c5-1]);
hz[-c4+c6+2][-c4+c5]=hz[-c4+c6+2][-c4+c5]-0.1*((double)(7))*(ey[-c4+c6+3][-c4+c5]+ex[-c4+c6+2][-c4+c5+1]-ex[-c4+c6+2][-c4+c5]-ey[-c4+c6+2][-c4+c5]);
hz[-c4+c6+2][-c4+c5+1]=hz[-c4+c6+2][-c4+c5+1]-0.1*((double)(7))*(ey[-c4+c6+3][-c4+c5+1]+ex[-c4+c6+2][-c4+c5+2]-ex[-c4+c6+2][-c4+c5+1]-ey[-c4+c6+2][-c4+c5+1]);
hz[-c4+c6+2][-c4+c5+2]=hz[-c4+c6+2][-c4+c5+2]-0.1*((double)(7))*(ey[-c4+c6+3][-c4+c5+2]+ex[-c4+c6+2][-c4+c5+3]-ex[-c4+c6+2][-c4+c5+2]-ey[-c4+c6+2][-c4+c5+2]);
}
for (; c6<=32*c3+31; c6=c6+1) {
ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);
ey[-c4+c6][-c4+c5+1]=ey[-c4+c6][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+1]-hz[-c4+c6-1][-c4+c5+1]);
ey[-c4+c6][-c4+c5+2]=ey[-c4+c6][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+2]-hz[-c4+c6-1][-c4+c5+2]);
ey[-c4+c6][-c4+c5+3]=ey[-c4+c6][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+3]-hz[-c4+c6-1][-c4+c5+3]);
ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);
ex[-c4+c6][-c4+c5+1]=ex[-c4+c6][-c4+c5+1]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+1]-hz[-c4+c6][-c4+c5]);
ex[-c4+c6][-c4+c5+2]=ex[-c4+c6][-c4+c5+2]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+2]-hz[-c4+c6][-c4+c5+1]);
ex[-c4+c6][-c4+c5+3]=ex[-c4+c6][-c4+c5+3]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5+3]-hz[-c4+c6][-c4+c5+2]);
hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5-1]+ex[-c4+c6-1][-c4+c5]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);
hz[-c4+c6-1][-c4+c5]=hz[-c4+c6-1][-c4+c5]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5]+ex[-c4+c6-1][-c4+c5+1]-ex[-c4+c6-1][-c4+c5]-ey[-c4+c6-1][-c4+c5]);
hz[-c4+c6-1][-c4+c5+1]=hz[-c4+c6-1][-c4+c5+1]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5+1]+ex[-c4+c6-1][-c4+c5+2]-ex[-c4+c6-1][-c4+c5+1]-ey[-c4+c6-1][-c4+c5+1]);
hz[-c4+c6-1][-c4+c5+2]=hz[-c4+c6-1][-c4+c5+2]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5+2]+ex[-c4+c6-1][-c4+c5+3]-ex[-c4+c6-1][-c4+c5+2]-ey[-c4+c6-1][-c4+c5+2]);
}
}
for (; c5<=32*c2+31; c5=c5+1) {
{
for (c6=32*c3; c6<=32*c3+28; c6=c6+4) {
ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);
ey[-c4+c6+1][-c4+c5]=ey[-c4+c6+1][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5]-hz[-c4+c6][-c4+c5]);
ey[-c4+c6+2][-c4+c5]=ey[-c4+c6+2][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5]-hz[-c4+c6+1][-c4+c5]);
ey[-c4+c6+3][-c4+c5]=ey[-c4+c6+3][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5]-hz[-c4+c6+2][-c4+c5]);
ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);
ex[-c4+c6+1][-c4+c5]=ex[-c4+c6+1][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+1][-c4+c5]-hz[-c4+c6+1][-c4+c5-1]);
ex[-c4+c6+2][-c4+c5]=ex[-c4+c6+2][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+2][-c4+c5]-hz[-c4+c6+2][-c4+c5-1]);
ex[-c4+c6+3][-c4+c5]=ex[-c4+c6+3][-c4+c5]-0.5*((double)(1))*(hz[-c4+c6+3][-c4+c5]-hz[-c4+c6+3][-c4+c5-1]);
hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6][-c4+c5-1]+ex[-c4+c6-1][-c4+c5]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);
hz[-c4+c6][-c4+c5-1]=hz[-c4+c6][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+1][-c4+c5-1]+ex[-c4+c6][-c4+c5]-ex[-c4+c6][-c4+c5-1]-ey[-c4+c6][-c4+c5-1]);
hz[-c4+c6+1][-c4+c5-1]=hz[-c4+c6+1][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+2][-c4+c5-1]+ex[-c4+c6+1][-c4+c5]-ex[-c4+c6+1][-c4+c5-1]-ey[-c4+c6+1][-c4+c5-1]);
hz[-c4+c6+2][-c4+c5-1]=hz[-c4+c6+2][-c4+c5-1]-0.1*((double)(7))*(ey[-c4+c6+3][-c4+c5-1]+ex[-c4+c6+2][-c4+c5]-ex[-c4+c6+2][-c4+c5-1]-ey[-c4+c6+2][-c4+c5-1]);
}
for (; c6<=32*c3+31; c6=c6+1) {
ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);
ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);
hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);
}
}
}
}
/*@ end @*/
}
for (c4=max(max(max(max(32*c2,32*c3-nx+32),0),32*c1-32*c2),32*c2-ny+32);c4<=min(min(min(tmax-1,32*c3-1),32*c1-32*c2+31),32*c2+30);c4++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c5=c4+1;c5<=32*c2+31;c5++) {
for (c6=32*c3;c6<=32*c3+31;c6++) {
{ey[-c4+c6][-c4+c5]=ey[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6-1][-c4+c5]);} ;
{ex[-c4+c6][-c4+c5]=ex[-c4+c6][-c4+c5]-((double)(1))/2*(hz[-c4+c6][-c4+c5]-hz[-c4+c6][-c4+c5-1]);} ;
{hz[-c4+c6-1][-c4+c5-1]=hz[-c4+c6-1][-c4+c5-1]-((double)(7))/10*(ey[1+-c4+c6-1][-c4+c5-1]+ex[-c4+c6-1][1+-c4+c5-1]-ex[-c4+c6-1][-c4+c5-1]-ey[-c4+c6-1][-c4+c5-1]);} ;
}
}
}
if (ny == 1) {
for (c4=max(max(max(0,32*c3-nx+1),32*c1-32*c2),32*c2);c4<=min(min(min(32*c3-1,tmax-1),32*c1-32*c2+31),32*c2+30);c4++) {
for (c6=32*c3;c6<=min(c4+nx-1,32*c3+31);c6++) {
{ey[-c4+c6][0]=ey[-c4+c6][0]-((double)(1))/2*(hz[-c4+c6][0]-hz[-c4+c6-1][0]);} ;
}
for (c6=32*c3;c6<=min(32*c3+31,c4+nx);c6++) {
{hz[-c4+c6-1][0]=hz[-c4+c6-1][0]-((double)(7))/10*(ey[1+-c4+c6-1][0]+ex[-c4+c6-1][1+0]-ex[-c4+c6-1][0]-ey[-c4+c6-1][0]);} ;
}
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(64*c3-1,32),floord(32*c3+tmax-32,32))) && (nx >= 2) && (ny == 1)) {
{ey[0][0]=32*c1-32*c3+31;} ;
for (c6=32*c1-32*c3+32;c6<=min(32*c1-32*c3+nx+30,32*c3+31);c6++) {
{ey[-32*c1+32*c3+c6-31][0]=ey[-32*c1+32*c3+c6-31][0]-((double)(1))/2*(hz[-32*c1+32*c3+c6-31][0]-hz[-32*c1+32*c3+c6-31 -1][0]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(64*c3-1,32),floord(32*c3+tmax-32,32))) && (nx >= 2) && (ny >= 2)) {
{ey[0][0]=32*c1-32*c3+31;} ;
for (c6=32*c1-32*c3+32;c6<=min(32*c1-32*c3+nx+30,32*c3+31);c6++) {
{ey[-32*c1+32*c3+c6-31][0]=ey[-32*c1+32*c3+c6-31][0]-((double)(1))/2*(hz[-32*c1+32*c3+c6-31][0]-hz[-32*c1+32*c3+c6-31 -1][0]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 >= ceild(64*c2-31,32)) && (c1 <= min(floord(32*c2+tmax-32,32),floord(64*c2-1,32))) && (nx == 1) && (ny >= 2)) {
{ey[0][0]=32*c1-32*c2+31;} ;
for (c5=32*c1-32*c2+32;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
{ey[0][-32*c1+32*c2+c5-31]=32*c1-32*c2+31;} ;
{ex[0][-32*c1+32*c2+c5-31]=ex[0][-32*c1+32*c2+c5-31]-((double)(1))/2*(hz[0][-32*c1+32*c2+c5-31]-hz[0][-32*c1+32*c2+c5-31 -1]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),2*c2-1)) && (nx == 1)) {
for (c5=32*c2;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
{ey[0][-32*c1+32*c2+c5-31]=32*c1-32*c2+31;} ;
{ex[0][-32*c1+32*c2+c5-31]=ex[0][-32*c1+32*c2+c5-31]-((double)(1))/2*(hz[0][-32*c1+32*c2+c5-31]-hz[0][-32*c1+32*c2+c5-31 -1]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c3+tmax-32,32),2*c3)) && (nx == 1) && (ny == 1)) {
{ey[0][0]=32*c1-32*c3+31;} ;
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),floord(64*c2-1,32))) && (nx == 1) && (ny == 1)) {
{ey[0][0]=32*c1-32*c2+31;} ;
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c3+tmax-32,32),2*c3)) && (nx == 1) && (ny >= 2)) {
{ey[0][0]=32*c1-32*c3+31;} ;
}
if ((-c1 == -c2-c3) && (c1 >= ceild(64*c2-31,32)) && (c1 <= min(floord(32*c2+tmax-32,32),floord(64*c2-1,32))) && (nx >= 2) && (ny >= 2)) {
{ey[0][0]=32*c1-32*c2+31;} ;
for (c5=32*c1-32*c2+32;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
{ey[0][-32*c1+32*c2+c5-31]=32*c1-32*c2+31;} ;
{ex[0][-32*c1+32*c2+c5-31]=ex[0][-32*c1+32*c2+c5-31]-((double)(1))/2*(hz[0][-32*c1+32*c2+c5-31]-hz[0][-32*c1+32*c2+c5-31 -1]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),2*c2-1)) && (nx >= 2)) {
for (c5=32*c2;c5<=min(32*c2+31,32*c1-32*c2+ny+30);c5++) {
{ey[0][-32*c1+32*c2+c5-31]=32*c1-32*c2+31;} ;
{ex[0][-32*c1+32*c2+c5-31]=ex[0][-32*c1+32*c2+c5-31]-((double)(1))/2*(hz[0][-32*c1+32*c2+c5-31]-hz[0][-32*c1+32*c2+c5-31 -1]);} ;
}
}
if ((-c1 == -c2-c3) && (c1 <= min(floord(32*c2+tmax-32,32),2*c2)) && (nx >= 2) && (ny == 1)) {
{ey[0][0]=32*c1-32*c2+31;} ;
}
if ((-c1 == -2*c2) && (-c1 == -2*c3) && (c1 <= floord(tmax-32,16)) && (nx >= 2) && (ny >= 2)) {
if (c1%2 == 0) {
{ey[0][0]=16*c1+31;} ;
}
}
if ((c1 >= 2*c2) && (c2 <= min(floord(tmax-32,32),c3-1)) && (ny == 1)) {
for (c6=32*c3;c6<=min(32*c2+nx+30,32*c3+31);c6++) {
{ey[-32*c2+c6-31][0]=ey[-32*c2+c6-31][0]-((double)(1))/2*(hz[-32*c2+c6-31][0]-hz[-32*c2+c6-31 -1][0]);} ;
}
}
if ((c1 >= 2*c2) && (c2 <= min(floord(tmax-32,32),c3-1)) && (ny >= 2)) {
for (c6=32*c3;c6<=min(32*c2+nx+30,32*c3+31);c6++) {
{ey[-32*c2+c6-31][0]=ey[-32*c2+c6-31][0]-((double)(1))/2*(hz[-32*c2+c6-31][0]-hz[-32*c2+c6-31 -1][0]);} ;
}
}
}
}
}
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
if (j%100==0)
printf("\n");
printf("%f ",hz[i][j]);
}
printf("\n");
}
}
#endif
return ((int) hz[0][0]);
}
|
GB_unop__minv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_int32_int32
// op(A') function: GB_unop_tran__minv_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 32) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_int32_int32
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 32) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_fp32)
// A*D function (colscale): GB (_AxD__ge_fp32)
// D*A function (rowscale): GB (_DxB__ge_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_fp32)
// C=scalar+B GB (_bind1st__ge_fp32)
// C=scalar+B' GB (_bind1st_tran__ge_fp32)
// C=A+scalar GB (_bind2nd__ge_fp32)
// C=A'+scalar GB (_bind2nd_tran__ge_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP32 || GxB_NO_GE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_places.c | #include <omp.h>
#define N 1000000000
int main() {
long int n = N;
int *a = (int *)malloc(n*sizeof(int));
#pragma omp teams distribute parallel for
{
for (long int i = 0; i < n; i++) {
a[i] = i;
}
}
free(a);
return 0;
}
|
lw_vector.h | /* lw_vector.h, part of the Global Epidemic Simulation v1.0 BETA
/* Lightweight vector class
/*
/* Copyright 2012, MRC Centre for Outbreak Analysis and Modelling
/*
/* Licensed under the Apache License, Version 2.0 (the "License");
/* you may not use this file except in compliance with the License.
/* You may obtain a copy of the License at
/*
/* http://www.apache.org/licenses/LICENSE-2.0
/*
/* Unless required by applicable law or agreed to in writing, software
/* distributed under the License is distributed on an "AS IS" BASIS,
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/* See the License for the specific language governing permissions and
/* limitations under the License.
*/
#ifndef LW_VECTOR
#define LW_VECTOR
#include "simINT64.h"
// Recent VS/Intel combinations have incorrectly left _OPENMP undefined. Enable OPENMP_ENABLED below if necessary.
#define OPENMP_ENABLED
#ifdef _OPENMP
#define OPENMP_ENABLED
//#pragma message("Vector class (lw_vector.h) - OpenMP parallelisation enabled")
#endif
/* enables thread safety while inserting or deleting elements from the vector */
/* can be switched off in a thread safe code to make it more efficient */
#ifdef OPENMP_ENABLED
//#define THREAD_SAFE_OPS
#endif
#ifndef THREAD_SAFE_OPS
//#pragma message("Warning: Vector class (lw_vector.h) - inserting or deleting elements from the vector is not thread safe!")
#endif
/* enables parallelisation of loops */
#ifdef OPENMP_ENABLED
#define LOOPS_IN_PARALLEL
#endif
#ifdef LOOPS_IN_PARALLEL
//#define SCHED_TYPE dynamic // schedule type: dynamic
#define SCHED_TYPE static // schedule type: static
#define NUM_PROCS_OUT 0 // number of processors excluded from parallelisation
#endif
/* enables index range check useful while debugging and testing */
//#define ENABLE_INDX_RANGE_CHECK
#ifndef ENABLE_INDX_RANGE_CHECK
//#pragma message("Warning: Vector class (lw_vector.h) - index range check disabled!")
#endif
/* exception codes */
#define OUT_OF_RANGE 1
#pragma pack(push,_CRT_PACKING)
template <class lwvType> class lw_vector
{
private:
#ifdef THREAD_SAFE_OPS
omp_lock_t ob__lock;
void init_lock();
void destroy_lock();
void set_lock();
void unset_lock();
bool is_lock_valid();
#endif
#ifdef OPENMP_ENABLED
int num_procs;
#endif
SIM_I64 alloc_size; // allocated vector size
SIM_I64 indx_flast; // index of a memory location that follows the last element of a vector ( indx_flast <= alloc_size )
lwvType *v; // the vector itself
/* methods that do bytewise data copying */
void copy_frwrd(char* dest, char* src, SIM_I64 count);
void copy_frwrd(char* dest, char* src, SIM_I64 count, int num_procs);
void copy_bckwrd(char* dest, char* src, SIM_I64 count);
public:
/* no proper definition of the iterator class */
typedef lwvType* iterator;
/* default constructor; allocates empty vector */
lw_vector();
/* constructor; allocates memory for a vector but the elements of this vector remain undefined */
/* num_els: number of elements */
lw_vector(SIM_I64 num_els);
/* constructor; allocates memory for a vector and initialises its elements with copies of el */
/* num_els: number of elements */
lw_vector(SIM_I64 num_els, const lwvType& el);
/* copy constructor */
lw_vector(const lw_vector& lwv);
/* destructor */
~lw_vector();
/* assignment operator */
lw_vector<lwvType> operator=(const lw_vector& lwv2);
/* provides access to vector elements via index indx; not thread safe */
lwvType& operator[](SIM_I64 indx);
/* returns a reference to the element at the position indx; not thread safe */
lwvType& at(SIM_I64 indx);
/* returns a reference to the first element of the vector */
lwvType& front();
/* returns a reference to the last element of the vector */
lwvType& back();
/* returns random-access iterator to the first element of the vector */
iterator begin();
/* returns random-access iterator that points just beyond the end of the vector */
iterator end();
/* returns the size of the vector (number of elements) */
SIM_I64 size();
/* tests if there are any elements in the vector */
bool empty();
/* adds an element el to the end of a vector */
void push_back(const lwvType& el);
/* inserts an element el into a vector at the position indx */
/* returns an index that points to the position of the inserted element */
SIM_I64 insert(SIM_I64 indx, const lwvType& el);
/* inserts count copies of el into a vector starting from the position indx_start */
void insert(SIM_I64 indx_start, SIM_I64 count, const lwvType& el);
/* inserts an element el into a vector at the position specified by iterator it */
/* returns an iterator that points to the position of the inserted element */
iterator insert(iterator it, const lwvType& el);
/* inserts count copies of el into a vector starting from the position specified by iterator it_start */
void insert(iterator it_start, SIM_I64 count, const lwvType& el);
/* deletes the element at the end of the vector without reducing its capacity */
void pop_back();
/* erases an element at the position specified by indx */
/* returns an index pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
SIM_I64 erase(SIM_I64 indx);
/* erases elements in the range starting from index indx_start and finishing just before the position defined by indx_end */
/* returns an index pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
SIM_I64 erase(SIM_I64 indx_start, SIM_I64 indx_end);
/* erases an element at the position specified by iterator it */
/* returns an iterator pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
iterator erase(iterator it);
/* erases elements in the range starting from iterator it_start and finishing just before the position defined by it_end */
/* returns an iterator pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
iterator erase(iterator it_start, iterator it_end);
/* erases all elements of the vector without reducing its capacity */
void clear();
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (default) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until until it reaches the requested size */
void resize(SIM_I64 new_size);
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (el) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until until it reaches the requested size */
void resize(SIM_I64 new_size, const lwvType& el);
/* compacts the vector reducing the allocated memory */
void compact();
};
/* vector size grows exponentially */
#define DEF_INI_SIZE 1 // default initial size
#define EXP_GROWTH_COEFF 3 //3
#ifdef THREAD_SAFE_OPS
template <class lwvType> inline void lw_vector<lwvType>::init_lock()
{
omp_init_lock(&ob__lock);
}
template <class lwvType> inline void lw_vector<lwvType>::destroy_lock()
{
omp_destroy_lock(&ob__lock);
ob__lock = 0;
}
template <class lwvType> inline void lw_vector<lwvType>::set_lock()
{
omp_set_lock(&ob__lock);
}
template <class lwvType> inline void lw_vector<lwvType>::unset_lock()
{
omp_unset_lock(&ob__lock);
}
template <class lwvType> inline bool lw_vector<lwvType>::is_lock_valid()
{
if( ob__lock != 0 )
return true;
else
return false;
}
#endif
template <class lwvType> inline void lw_vector<lwvType>::copy_frwrd(char* dest, char* src, SIM_I64 count)
{
for(SIM_I64 i=0; i < count; i++)
*(dest++) = *(src++);
}
template <class lwvType> inline void lw_vector<lwvType>::copy_frwrd(char* dest, char* src, SIM_I64 count, int num_procs)
{
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < count; i++)
dest[i] = src[i];
}
template <class lwvType> inline void lw_vector<lwvType>::copy_bckwrd(char* dest, char* src, SIM_I64 count)
{
for(SIM_I64 i = count; i > 0 ; i--)
*(--dest) = *(--src);
}
/* default constructor; allocates empty vector */
template <class lwvType> lw_vector<lwvType>::lw_vector() : alloc_size(0), indx_flast(0), v(0)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs() - NUM_PROCS_OUT; // TESTING!
#endif
}
/* constructor; allocates memory for a vector but the elements of this vector remain undefined */
/* num_els: number of elements */
template <class lwvType> lw_vector<lwvType>::lw_vector(SIM_I64 num_els) : alloc_size(num_els), indx_flast(0)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs();
#endif
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
}
/* constructor; allocates memory for a vector and initialises its elements with copies of el */
/* num_els: number of elements */
template <class lwvType> lw_vector<lwvType>::lw_vector(SIM_I64 num_els, const lwvType& el) : alloc_size(num_els), indx_flast(alloc_size)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs();
#endif
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
::new(&v[i]) lwvType(el);
}
/* copy constructor */
template <class lwvType> lw_vector<lwvType>::lw_vector(const lw_vector& lwv)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = lwv.num_procs;
#endif
alloc_size = lwv.alloc_size;
indx_flast = lwv.indx_flast;
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
::new(&v[i]) lwvType(lwv.v[i]);
}
/* destructor */
template <class lwvType> lw_vector<lwvType>::~lw_vector()
{
if( v != 0 )
{
for(SIM_I64 i=0; i < indx_flast; i++)
v[i].~lwvType();
delete [] (char*)v;
alloc_size = 0;
indx_flast = 0;
}
v = 0;
#ifdef THREAD_SAFE_OPS
destroy_lock();
#endif
}
/* assignment operator */
template <class lwvType> lw_vector<lwvType> lw_vector<lwvType>::operator=(const lw_vector& lwv2)
{
#ifdef THREAD_SAFE_OPS
if( !is_lock_valid() )
init_lock();
set_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = lwv2.num_procs;
#endif
if( v != 0 )
{
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
v[i].~lwvType();
delete [] (char*)v;
}
alloc_size = lwv2.alloc_size;
indx_flast = lwv2.indx_flast;
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
v[i] = lwv2.v[i];
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return *this;
}
/* provides access to vector elements via index indx; not thread safe */
template <class lwvType> inline lwvType& lw_vector<lwvType>::operator[](SIM_I64 indx)
{
#ifdef ENABLE_INDX_RANGE_CHECK
if( indx < 0 || indx >= indx_flast )
throw OUT_OF_RANGE;
#endif
return v[indx];
}
/* returns a reference to the element at the position indx; not thread safe */
template <class lwvType> inline lwvType& lw_vector<lwvType>::at(SIM_I64 indx)
{
#ifdef ENABLE_INDX_RANGE_CHECK
if( indx < 0 || indx >= indx_flast )
throw OUT_OF_RANGE;
#endif
return v[indx];
}
/* returns a reference to the first element of the vector */
template <class lwvType> inline lwvType& lw_vector<lwvType>::front()
{
return v[0];
}
/* returns a reference to the last element of the vector */
template <class lwvType> inline lwvType& lw_vector<lwvType>::back()
{
return v[indx_flast - 1];
}
/* returns random-access iterator to the first element of the vector */
template <class lwvType> inline typename lw_vector<lwvType>::iterator lw_vector<lwvType>::begin()
{
return &v[0];
}
/* returns random-access iterator that points just beyond the end of the vector */
template <class lwvType> inline typename lw_vector<lwvType>::iterator lw_vector<lwvType>::end()
{
return &v[indx_flast];
}
/* returns the size of the vector (number of elements) */
template <class lwvType> inline SIM_I64 lw_vector<lwvType>::size()
{
return indx_flast;
}
/* tests if there are any elements in the vector */
template <class lwvType> inline bool lw_vector<lwvType>::empty()
{
if( indx_flast == 0 )
return true;
else
return false;
}
/* adds an element el to the end of a vector */
template <class lwvType> void lw_vector<lwvType>::push_back(const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast >= alloc_size ) // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( v != 0 )
{
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
delete [] (char*)v;
}
v = v_temp;
alloc_size = new_alloc_size;
}
::new(&v[indx_flast++]) lwvType(el);
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* inserts an element el into a vector at the position indx */
template <class lwvType> SIM_I64 lw_vector<lwvType>::insert(SIM_I64 indx, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx < 0 || indx > indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_flast < alloc_size ) // there is still memory available in the vector
{
copy_bckwrd((char*)(&v[indx_flast + 1]), (char*)(&v[indx_flast]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
::new(&v[indx]) lwvType(el);
}
else // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
#endif
::new(&v_temp[indx]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast++;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx;
}
/* inserts count copies of el into a vector starting from the position indx_start */
template <class lwvType> void lw_vector<lwvType>::insert(SIM_I64 indx_start, SIM_I64 count, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_start < 0 || indx_start > indx_flast || count < 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( (indx_flast + count) <= alloc_size ) // allocated size of the vector remains unchanged
{
copy_bckwrd((char*)(&v[indx_flast + count]), (char*)(&v[indx_flast + count - 1]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v[i]) lwvType(el);
}
else // allocated vector size is insufficient; reallocation and expansion needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v_temp[i]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast += count;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* inserts an element el into a vector at the position specified by iterator it */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::insert(iterator it, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it + 1) - (SIM_I64)it;
SIM_I64 indx = ((SIM_I64)it - (SIM_I64)begin()) / delta;
if( indx < 0 || indx > indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_flast < alloc_size ) // there is still memory available in the vector
{
copy_bckwrd((char*)(&v[indx_flast + 1]), (char*)(&v[indx_flast]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
::new(&v[indx]) lwvType(el);
}
else // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
#endif
::new(&v_temp[indx]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast++;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it;
}
/* inserts count copies of el into a vector starting from the position specified by iterator it_start */
template <class lwvType> void lw_vector<lwvType>::insert(iterator it_start, SIM_I64 count, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it_start + 1) - (SIM_I64)it_start;
SIM_I64 indx_start = ((SIM_I64)it_start - (SIM_I64)begin()) / delta;
if( indx_start < 0 || indx_start > indx_flast || count < 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( (indx_flast + count) <= alloc_size ) // allocated size of the vector remains unchanged
{
copy_bckwrd((char*)(&v[indx_flast + count]), (char*)(&v[indx_flast + count - 1]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v[i]) lwvType(el);
}
else // allocated vector size is insufficient; reallocation and expansion needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v_temp[i]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast += count;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* deletes the element at the end of the vector without reducing its capacity */
template <class lwvType> void lw_vector<lwvType>::pop_back()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast == 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return;
}
v[--indx_flast].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* erases an element at the position specified by indx */
/* returns an index pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
template <class lwvType> SIM_I64 lw_vector<lwvType>::erase(SIM_I64 indx)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx < 0 || indx >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
v[indx].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - 1) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx]), (char*)(&v[indx + 1]), (indx_flast -indx - 1) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast--;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx;
}
/* erases elements in the range starting from index indx_start and finishing just before the position defined by indx_end */
/* returns an index pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
template <class lwvType> SIM_I64 lw_vector<lwvType>::erase(SIM_I64 indx_start, SIM_I64 indx_end)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_start > indx_end || indx_start < 0 || indx_start >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_end > indx_flast )
indx_end = indx_flast;
for(SIM_I64 i = indx_start; i < indx_end; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - (indx_end - indx_start)) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast -= indx_end - indx_start;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx_start;
}
/* erases an element at the position specified by iterator it */
/* returns an iterator pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::erase(iterator it)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it + 1) - (SIM_I64)it;
SIM_I64 indx = ((SIM_I64)it - (SIM_I64)begin()) / delta;
if( indx < 0 || indx >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
v[indx].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - 1) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx]), (char*)(&v[indx + 1]), (indx_flast -indx - 1) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast--;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it;
}
/* erases elements in the range starting from iterator it_start and finishing just before the position defined by it_end */
/* returns an iterator pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::erase(iterator it_start, iterator it_end)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it_start + 1) - (SIM_I64)it_start;
SIM_I64 indx_start = ((SIM_I64)it_start - (SIM_I64)begin()) / delta;
SIM_I64 indx_end = ((SIM_I64)it_end - (SIM_I64)begin()) / delta;
if( indx_start > indx_end || indx_start < 0 || indx_start >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_end > indx_flast )
indx_end = indx_flast;
for(SIM_I64 i = indx_start; i < indx_end; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - (indx_end - indx_start)) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast -= indx_end - indx_start;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it_start;
}
/* erases all elements of the vector without reducing its capacity */
template <class lwvType> void lw_vector<lwvType>::clear()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? (indx_flast * (SIM_I64)sizeof(lwvType)) / num_procs : (SIM_I64)sizeof(lwvType);
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = 0; i < indx_flast; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
indx_flast = 0;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (default) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until it reaches the requested size */
template <class lwvType> void lw_vector<lwvType>::resize(SIM_I64 new_size)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 new_alloc_size = new_size;
if( new_alloc_size != alloc_size )
{
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( indx_flast > new_alloc_size )
indx_flast = new_alloc_size;
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = (new_alloc_size - indx_flast) > num_procs ? (new_alloc_size - indx_flast) / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v_temp[i]) lwvType();
if( v != 0 )
delete [] (char*)v;
v = v_temp;
}
else if( new_alloc_size == alloc_size )
{
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v[i]) lwvType();
}
indx_flast = new_alloc_size;
alloc_size = new_alloc_size;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (el) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until it reaches the requested size */
template <class lwvType> void lw_vector<lwvType>::resize(SIM_I64 new_size, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 new_alloc_size = new_size;
if( new_alloc_size != alloc_size )
{
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( indx_flast > new_alloc_size )
indx_flast = new_alloc_size;
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = (new_alloc_size - indx_flast) > num_procs ? (new_alloc_size - indx_flast) / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v_temp[i]) lwvType(el);
if( v != 0 )
delete [] (char*)v;
v = v_temp;
}
else if( new_alloc_size == alloc_size )
{
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v[i]) lwvType(el);
}
indx_flast = new_alloc_size;
alloc_size = new_alloc_size;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* compacts the vector reducing the allocated memory */
template <class lwvType> void lw_vector<lwvType>::compact()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast < alloc_size )
{
SIM_I64 new_alloc_size = indx_flast;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
alloc_size = new_alloc_size;
v = v_temp;
}
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
#pragma pack(pop)
#endif
|
MonteCarlo.c | // Description: This program will estimate the value of
// PI using the monte carlo method. It utilizes parallel
// threads using OpenMP to calculate a user specified
// amount of samples. The program also tracks how long
// this process takes then prints the data out.
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define PI 3.1459265358979
//prototypes
void ProgramIntroduction();
void GetInput(const char**, int, long long*, int*);
void MonteCarlo(long long, long long*);
void EstimatePI(const long long, const long long, double*, double*);
void DisplayResults(const long long, const int, const double, const double, const double);
int main(int argc, const char* argv[])
{
system("clear");
ProgramIntroduction();
int thread_count = 0;
long long samples, hits = 0;
double pi_estimated, pi_difference = 0.0;
double start_time, end_time, total_time = 0.0;
start_time = omp_get_wtime();
GetInput(argv, argc, &samples, &thread_count);
#pragma omp parallel num_threads(thread_count)
{
MonteCarlo(samples, &hits);
}
end_time = omp_get_wtime();
total_time = end_time - start_time;
EstimatePI(samples, hits, &pi_estimated, &pi_difference);
DisplayResults(samples, thread_count, total_time, pi_estimated, pi_difference);
return 0;
}
//***********************************************************************
//Function : ProgramIntroduction ***
//Description : Displays a message to the user explaining the ***
// : functionality of the program. ***
//Input Args : void/none ***
//Output args : void/none ***
//Return : void/none ***
//***********************************************************************
void ProgramIntroduction()
{
printf("This program creates and uses a number of threads specified by the user.\n");
printf("It uses the Monte Carlo method to estimate the value of PI. The estimation\n");
printf("becomes more accurate as the sample size is increased. Similarly, if more\n");
printf("threads are provided the program will complete the work faster.\n");
printf("If two input numbers are not provided on the command line, please \n");
printf("enter them when prompted below. (ex. 20000 4)\n\n");
printf("Press any key to continue...");
getchar();
printf("\n");
}
//***********************************************************************
//Function : GetInput ***
//Description : Gets the two input values used for sample number and ***
// : thread count. ***
//Input Args : cl_array - cl args, size - amount of cl args, ***
// : samples - num samples, thread_count - num threads ***
//Output args : samples, thread_count ***
//Return : void/none ***
//***********************************************************************
void GetInput(const char** cl_array, int size, long long *samples, int *thread_count)
{
if(size == 3)
{
*samples = strtol(cl_array[1], NULL, 10);
*thread_count = strtol(cl_array[2], NULL, 10);
}
else
{
printf("Enter Total Samples: ");
scanf("%lli", samples);
printf("Enter Total Threads: ");
scanf("%d", thread_count);
printf("\n");
}
printf("You input %lli and %d.\n\n", *samples, *thread_count);
}
//***********************************************************************
//Function : MonteCarlo ***
//Description : Calculates number of hits by using the monte carlo ***
// : method. Found hits are then used to estimate PI. ***
//Input Args : samples - num samples, hits - total num hits ***
//Output args : hits ***
//Return : void/none ***
//***********************************************************************
void MonteCarlo(long long samples, long long *hits)
{
double x, y = 0.0;
long long int total_hits = 0;
struct timeval t_val;
int thread_num = omp_get_thread_num();
int threads = omp_get_num_threads();
long long size = samples/threads;
gettimeofday(&t_val, NULL);
unsigned long time_x = (unsigned long)t_val.tv_sec + thread_num; //offset to ensure seeds are different
unsigned long time_y = (unsigned long)t_val.tv_usec;
for(int i = 0; i < size; i++)
{
x = rand_r(&time_x);
x = x/RAND_MAX;
y = rand_r(&time_y);
y = y/RAND_MAX;
x = x * x;
y = y * y;
if((x + y) <= 1)
{
total_hits++;
}
}
#pragma omp critical
{
*hits += total_hits;
}
#pragma omp critical
{
printf("Thread %d of %d hits %lli samples out of %lli.\n", thread_num + 1, threads, total_hits, size);
}
}
//***********************************************************************
//Function : EstimatePI ***
//Description : Estimates PI using the number of hits from monte ***
// : carlo method and number of samples. ***
//Input Args : samples - num samples, hits - total num hits ***
// : pi_estimated - estimated pi, pi_difference - dif ***
// : between actual pi and estimated pi ***
//Output args : pi_estimated, pi_difference ***
//Return : void/none ***
//***********************************************************************
void EstimatePI(const long long samples, const long long hits, double *pi_estimated, double *pi_difference)
{
*pi_estimated = (double)(4 * hits) / samples;
*pi_difference = (*pi_estimated - PI) * -1;
}
//***********************************************************************
//Function : DisplayResults ***
//Description : Displays results obtained from the monte carlo method ***
// : and other related data. ***
//Input Args : samples - num samples, thread_count - num threads ***
// : total_time - prog run time, pi_estimated - estimated ***
// : pi, pi_difference - dif between actual pi and ***
// : estimated pi ***
//Output args : void/none ***
//Return : void/none ***
//***********************************************************************
void DisplayResults(const long long samples, const int thread_count, const double total_time, const double pi_estimated, const double pi_difference)
{
printf("\n----------------------------------\n");
printf("Sample Size: %lli\n", samples);
printf("Number of Threads: %d\n", thread_count);
printf("Estimated PI: %0.12f\n", pi_estimated);
printf("Actual PI: %0.12f\n", PI);
printf("Difference: %0.12f\n", pi_difference);
printf("Run Time: %0.12f\n", total_time);
printf("----------------------------------\n\n");
} |
ten_tusscher_2004_RS_CPU_epi.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// sv[0] = INITIAL_V; // V; millivolt
// sv[1] = 0.f; //M
// sv[2] = 0.75; //H
// sv[3] = 0.75f; //J
// sv[4] = 0.f; //Xr1
// sv[5] = 1.f; //Xr2
// sv[6] = 0.f; //Xs
// sv[7] = 1.f; //S
// sv[8] = 0.f; //R
// sv[9] = 0.f; //D
// sv[10] = 1.f; //F
// sv[11] = 1.f; //FCa
// sv[12] = 1.f; //G
// sv[13] = 0.0002; //Cai
// sv[14] = 0.2f; //CaSR
// sv[15] = 11.6f; //Nai
// sv[16] = 138.3f; //Ki
///real sv11[]={-86.4132492539631,0.00133347038675965,0.775880896537911,0.775771252769902,0.000178713357197260,0.483477220262479,0.00297293159223580,0.999998293141309,1.98410777026336e-08,1.93055451927418e-05,0.999768022017469,1.00664361385672,0.999983813751547,5.56594018103532e-05,0.360866271906676,10.8624828330324,138.855946632933};
/// initial condition
real sv11[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
sv[0] = sv11[0]; // V; millivolt
sv[1] = sv11[1]; //M
sv[2] = sv11[2]; //H
sv[3] = sv11[3]; //J
sv[4] = sv11[4]; //Xr1
sv[5] = sv11[5]; //Xr2
sv[6] = sv11[6]; //Xs
sv[7] = sv11[7]; //S
sv[8] = sv11[8]; //R
sv[9] = sv11[9]; //D
sv[10] = sv11[10]; //F
sv[11] = sv11[11]; //FCa
sv[12] = sv11[12]; //G
sv[13] = sv11[13]; //Cai
sv[14] = sv11[14]; //CaSR
sv[15] = sv11[15]; //Nai
sv[16] = sv11[16]; //Ki
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
#ifdef EPI
real Gks=0.245;
#endif
#ifdef ENDO
real Gks=0.245;
#endif
#ifdef MCELL
real Gks=0.062;
#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
#ifdef EPI
real Gto=0.294;
#endif
#ifdef ENDO
real Gto=0.073;
#endif
#ifdef MCELL
real Gto=0.294;
#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
omp_smithW_orig.c | /*********************************************************************************
* SmithβWaterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG
* Execution: ./omp_smithW <number_of_threads> <number_of_col> <number_of_rows>
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
/*--------------------------------------------------------------------
* Functions Prototypes
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
//Defines size of strings to be compared
long long int m ; //Columns - Size of string a
long long int n ; //Lines - Size of string b
//Defines scores
int matchScore = 5;
int missmatchScore = -3;
int gapScore = -4;
//Strings over the Alphabet Sigma
char *a, *b;
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
int thread_count = strtol(argv[1], NULL, 10);
m = strtoll(argv[2], NULL, 10);
n = strtoll(argv[3], NULL, 10);
#ifdef DEBUG
printf("\nMatrix[%lld][%lld]\n", n, m);
#endif
//Allocates a and b
a = malloc(m * sizeof(char));
b = malloc(n * sizeof(char));
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = calloc(m * n, sizeof(int));
//Allocates predecessor matrix P
int *P;
P = calloc(m * n, sizeof(int));
//Gen rand arrays a and b
generate();
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
//Gets Initial time
double initialTime = omp_get_wtime();
long long int si, sj, ai, aj;
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
long long int nEle;
#pragma omp parallel num_threads(thread_count) \
default(none) shared(H, P, maxPos, nDiag) private(nEle, i, si, sj, ai, aj)
{
for (i = 1; i <= nDiag; ++i)
{
nEle = nElement(i);
calcFirstDiagElement(&i, &si, &sj);
#pragma omp for
for (j = 1; j <= nEle; ++j)
{
ai = si - j + 1;
aj = sj + j - 1;
similarityScore(ai, aj, H, P, &maxPos);
}
}
}
backtrack(P, maxPos);
//Gets final time
double finalTime = omp_get_wtime();
printf("\nElapsed time: %f\n\n", finalTime - initialTime);
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal elements
*/
long long int nElement(long long int i) {
if (i < m && i < n) {
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) {
//Number of elements in the diagonal is stable
long int min = min(m, n);
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + abs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement
* Purpose: Calculate the position of (si, sj)-element
*/
void calcFirstDiagElement(long long int *i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (*i < n) {
*si = *i;
*sj = 1;
} else {
*si = n - 1;
*sj = *i - n + 2;
}
}
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate the maximum Similarity-Score H(i,j)
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if 'β' insert e 'β' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if 'β' insert e 'β' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter β
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter β
max = up;
pred = UP;
}
if (left > max) { //insert letter β
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
#pragma omp critical
*maxPos = index;
}
} /* End of similarityScore */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("β ");
else if (matrix[index] == -LEFT)
printf("β ");
else if (matrix[index] == -DIAGONAL)
printf("β ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("β ");
else if (matrix[index] == LEFT)
printf("β ");
else if (matrix[index] == DIAGONAL)
printf("β ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/ |
GB_unop__abs_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_uint64_uint64)
// op(A') function: GB (_unop_tran__abs_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
poaBarAligner.c | /**
* This is designed as a drop-in replacement for the bar aligner, using the abpoa multiple sequence aligner.
*
* Released under the MIT license, see LICENSE.txt
*/
#include "abpoa.h"
#include "poaBarAligner.h"
#include "flowerAligner.h"
#include <stdio.h>
#include <ctype.h>
// OpenMP
//#if defined(_OPENMP)
//#include <omp.h>
//#endif
abpoa_para_t *abpoaParamaters_constructFromCactusParams(CactusParams *params) {
abpoa_para_t *abpt = abpoa_init_para();
// output options
abpt->out_msa = 1; // generate Row-Column multiple sequence alignment(RC-MSA), set 0 to disable
abpt->out_cons = 0; // generate consensus sequence, set 0 to disable
// alignment mode. 0:global alignment, 1:local, 2:extension
// only global works
abpt->align_mode = ABPOA_GLOBAL_MODE;
// banding parameters
abpt->wb = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentBandConstant");
abpt->wf = cactusParams_get_float(params, 3, "bar", "poa", "partialOrderAlignmentBandFraction");
// gap scoring model
abpt->gap_open1 = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentGapOpenPenalty1");
abpt->gap_ext1 = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentGapExtensionPenalty1");
abpt->gap_open2 = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentGapOpenPenalty2");
abpt->gap_ext2 = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentGapExtensionPenalty2");
// seeding paramters
abpt->disable_seeding = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentDisableSeeding");
assert(abpt->disable_seeding == 0 || abpt->disable_seeding == 1);
abpt->k = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentMinimizerK");
abpt->w = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentMinimizerW");
abpt->min_w = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentMinimizerMinW");
// progressive toggle
abpt->progressive_poa = cactusParams_get_int(params, 3, "bar", "poa", "partialOrderAlignmentProgressiveMode");
// generate the substitution matrix
abpoa_post_set_para(abpt);
// optionally override the substitution matrix
char *submat_string = cactusParams_get_string(params, 3, "bar", "poa", "partialOrderAlignmentSubMatrix");
if (submat_string && strlen(submat_string) > 0) {
// Note, this will be used to explicitly override abpoa's subsitution matrix just before aligning
abpt->use_score_matrix = 1;
assert(abpt->m == 5);
int count = 0;
for (char* val = strtok(submat_string, " "); val != NULL; val = strtok(NULL, " ")) {
abpt->mat[count++] = atoi(val);
}
assert(count == 25);
int i; abpt->min_mis = 0, abpt->max_mat = 0;
for (i = 0; i < abpt->m * abpt->m; ++i) {
if (abpt->mat[i] > abpt->max_mat)
abpt->max_mat = abpt->mat[i];
if (-abpt->mat[i] > abpt->min_mis)
abpt->min_mis = -abpt->mat[i];
}
}
free(submat_string);
return abpt;
}
// It turns out abpoa can write to these, so we make a quick copy before using
static abpoa_para_t *copy_abpoa_params(abpoa_para_t *abpt) {
abpoa_para_t *abpt_cpy = abpoa_init_para();
abpt_cpy->out_msa = 1;
abpt_cpy->out_cons = 0;
abpt_cpy->align_mode = abpt->align_mode;
abpt_cpy->wb = abpt->wb;
abpt_cpy->wf = abpt->wf;
abpt_cpy->use_score_matrix = abpt->use_score_matrix;
abpt_cpy->match = abpt->match;
abpt_cpy->mismatch = abpt->mismatch;
abpt_cpy->gap_mode = abpt->gap_mode;
abpt_cpy->gap_open1 = abpt->gap_open1;
abpt_cpy->gap_ext1 = abpt->gap_ext1;
abpt_cpy->gap_open2 = abpt->gap_open2;
abpt_cpy->gap_ext2 = abpt->gap_ext2;
abpt_cpy->disable_seeding = abpt->disable_seeding;
abpt_cpy->k = abpt->k;
abpt_cpy->w = abpt->w;
abpt_cpy->min_w = abpt->min_w;
abpt_cpy->progressive_poa = abpt->progressive_poa;
if (abpt->use_score_matrix == 1) {
memcpy(abpt_cpy->mat, abpt->mat, abpt->m * abpt->m * sizeof(int));
}
abpt_cpy->max_mat = abpt->max_mat;
abpt_cpy->min_mis = abpt->min_mis;
return abpt_cpy;
}
// char <--> uint8_t conversion copied over from abPOA example
// AaCcGgTtNn ==> 0,1,2,3,4
static unsigned char nst_nt4_table[256] = {
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5 /*'-'*/, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 0, 4, 1, 4, 4, 4, 2, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
};
char msa_to_base(uint8_t n) {
return "ACGTN-"[n];
}
uint8_t msa_to_byte(char c) {
return nst_nt4_table[(int)c];
}
static uint8_t rc_table[6] = { 3, 2, 1, 0, 4, 5 };
static inline uint8_t msa_to_rc(uint8_t n) {
return rc_table[n];
}
void msa_destruct(Msa *msa) {
for(int64_t i=0; i<msa->seq_no; i++) {
if (msa->seqs != NULL) {
free(msa->seqs[i]);
}
free(msa->msa_seq[i]);
}
free(msa->seqs);
free(msa->msa_seq);
free(msa->seq_lens);
free(msa);
}
void msa_print(Msa *msa, FILE *f) {
fprintf(f, "MSA. Seq no: %i column no: %i \n", (int)msa->seq_no, (int)msa->column_no);
for(int64_t i=0; i<msa->seq_no; i++) {
fprintf(f, "Row:%i [len=%i]\t", (int)i, (int)msa->seq_lens[i]);
for(int64_t j=0; j<msa->column_no; j++) {
fprintf(f, "%c", msa_to_base(msa->msa_seq[i][j]));
}
fprintf(f, "\n");
}
fprintf(f, "\n");
}
/**
* flip msa to its reverse complement (for trimming purposees)
*/
static void flip_msa_seq(Msa* msa) {
if (msa != NULL) {
int64_t middle = msa->column_no / 2;
bool odd = msa->column_no % 2 == 1;
for (int64_t i = 0; i < msa->seq_no; ++i) {
for (int64_t j = 0; j < middle; ++j) {
uint8_t buf = msa->msa_seq[i][j];
msa->msa_seq[i][j] = msa_to_rc(msa->msa_seq[i][msa->column_no - 1 - j]);
msa->msa_seq[i][msa->column_no - 1 - j] = msa_to_rc(buf);
}
if (odd) {
msa->msa_seq[i][middle] = msa_to_rc(msa->msa_seq[i][middle]);
}
}
}
}
/**
* Returns an array of floats, one for each corresponding column in the MSA. Each float
* is the score of the column in the alignment.
*/
static float *make_column_scores(Msa *msa) {
float *column_scores = st_calloc(msa->column_no, sizeof(float));
for(int64_t i=0; i<msa->column_no; i++) {
// Score is simply max(number of aligned bases in the column - 1, 0)
for(int64_t j=0; j<msa->seq_no; j++) {
if(msa_to_base(msa->msa_seq[j][i]) != '-') {
column_scores[i]++;
}
}
if(column_scores[i] >= 1.0) {
column_scores[i]--;
}
assert(column_scores[i] >= 0.0);
}
return column_scores;
}
/**
* Fills in cu_column_scores with the cumulative sum of column scores, from left-to-right, of columns
* containing a non-gap character in the given "row".
*/
static void sum_column_scores(int64_t row, Msa *msa, float *column_scores, float *cu_column_scores) {
float cu_score = 0.0; // The cumulative sum of column scores containing bases for the given row
int64_t j=0; // The index in the DNA string for the given row
for(int64_t i=0; i<msa->column_no; i++) {
if(msa_to_base(msa->msa_seq[row][i]) != '-') {
cu_score += column_scores[i];
cu_column_scores[j++] = cu_score;
}
}
assert(msa->seq_lens[row] == j); // We should cover all the bases in the DNA sequence
}
/**
* Removes the suffix of the given row from the MSA and updates the column scores. suffix_start is the beginning
* suffix to remove.
*/
static void trim_msa_suffix(Msa *msa, float *column_scores, int64_t row, int64_t suffix_start) {
int64_t seq_index = 0;
for(int64_t i=0; i<msa->column_no; i++) {
if(msa_to_base(msa->msa_seq[row][i]) != '-') {
if(seq_index++ >= suffix_start) {
msa->msa_seq[row][i] = msa_to_byte('-');
column_scores[i] = column_scores[i]-1 > 0 ? column_scores[i]-1 : 0;
assert(column_scores[i] >= 0.0);
}
}
}
}
/**
* Used to make two MSAs consistent with each other for a shared sequence
*/
static void trim(int64_t row1, Msa *msa1, float *column_scores1,
int64_t row2, Msa *msa2, float *column_scores2, int64_t overlap) {
if(overlap == 0) { // There is no overlap, so no need to trim either MSA
return;
}
assert(overlap > 0); // Otherwise the overlap must be positive
int64_t seq_len1 = msa1->seq_lens[row1]; // The prefix length of the forward complement sequence in the first MSA
int64_t seq_len2 = msa2->seq_lens[row2]; // The prefix length of the reverse complement sequence in the second MSA
// They can be different if either MSA does not include the whole sequence
assert(overlap <= seq_len1); // The overlap must be less than the length of the prefixes
assert(overlap <= seq_len2);
// Get the cumulative cut scores for the columns containing the shared sequence
float *cu_column_scores1 = st_malloc(msa1->column_no * sizeof(float));
float *cu_column_scores2 = st_malloc(msa2->column_no * sizeof(float));
sum_column_scores(row1, msa1, column_scores1, cu_column_scores1);
sum_column_scores(row2, msa2, column_scores2, cu_column_scores2);
// The score if we cut all of the overlap in msa1 and keep all of the overlap in msa2
assert(seq_len2 <= msa2->column_no);
float max_cut_score = cu_column_scores2[seq_len2-1];
if(overlap < seq_len1) { // The overlap is less than the length of the first sequence
assert(seq_len1-overlap-1 >= 0);
max_cut_score += cu_column_scores1[seq_len1-overlap-1]; // We will keep everything before the overlap
}
int64_t max_overlap_cut_point = 0; // the length of the prefix of the overlap of msa1 to keep
// Not walk through each possible cut point within the overlap
for(int64_t i=0; i<overlap-1; i++) {
assert(seq_len2-i-2 >= 0); // Sanity check
float cut_score = cu_column_scores1[seq_len1-overlap+i] + cu_column_scores2[seq_len2-i-2]; // The score if we keep prefix up to
// and including column i of MSA1's overlap, and the prefix of msa2 up to and including column seq_len-i-2
if(cut_score > max_cut_score) {
max_overlap_cut_point = i + 1;
max_cut_score = cut_score;
}
}
// The score if we cut all of msa2's overlap and keep all of msa1's
float f = cu_column_scores1[seq_len1-1];
if(overlap < seq_len2) {
assert(seq_len2-overlap-1 >= 0);
f += cu_column_scores2[seq_len2-overlap-1];
}
if(f > max_cut_score) {
max_cut_score = f;
max_overlap_cut_point = overlap;
}
// Now trim back the two MSAs
assert(max_overlap_cut_point <= overlap);
trim_msa_suffix(msa1, column_scores1, row1, seq_len1 - overlap + max_overlap_cut_point);
trim_msa_suffix(msa2, column_scores2, row2, seq_len2 - max_overlap_cut_point);
free(cu_column_scores1);
free(cu_column_scores2);
}
/**
* recompute the seq_lens of a trimmed msa and clip off empty suffix columns
* (todo: can this be built into trimming code?)
*/
static void msa_fix_trimmed(Msa* msa) {
for (int64_t i = 0; i < msa->seq_no; ++i) {
// recompute the seq_len
msa->seq_lens[i] = 0;
for (int64_t j = 0; j < msa->column_no; ++j) {
if (msa_to_base(msa->msa_seq[i][j]) != '-') {
++msa->seq_lens[i];
}
}
}
// trim empty columns
int64_t empty_columns = 0;
for (bool still_empty = true; empty_columns < msa->column_no; ++empty_columns) {
for (int64_t i = 0; i < msa->seq_no && still_empty; ++i) {
still_empty = msa_to_base(msa->msa_seq[i][msa->column_no - 1 - empty_columns]) == '-';
}
if (!still_empty) {
break;
}
}
msa->column_no -= empty_columns;
}
Msa *msa_make_partial_order_alignment(char **seqs, int *seq_lens, int64_t seq_no, int64_t window_size,
abpoa_para_t *poa_parameters) {
assert(seq_no > 0);
// abpoa can write to these, so we make a copy to be safe
abpoa_para_t *abpt = copy_abpoa_params(poa_parameters);
// we overlap the sliding window, and use the trimming logic to find the best cut point between consecutive windows
// todo: cli-facing parameter
float window_overlap_frac = 0.5;
int64_t window_overlap_size = window_overlap_frac * window_size;
if (window_overlap_size > 0) {
--window_overlap_size; // don't want empty window when fully trimmed on each end
}
// keep track of what's left to align for the sliding window
int64_t bases_remaining = 0;
// keep track of current offsets
int64_t* seq_offsets = (int64_t*)st_calloc(seq_no, sizeof(int64_t));
// keep track of empty chunks
bool* empty_seqs = (bool*)st_calloc(seq_no, sizeof(bool));
// keep track of overlaps
int64_t* row_overlaps = (int64_t*)st_calloc(seq_no, sizeof(int64_t));
// allocate the poa input buffer
uint8_t **bseqs = (uint8_t**)st_malloc(sizeof(uint8_t*) * seq_no);
for (int64_t i = 0; i < seq_no; ++i) {
int64_t row_size = seq_lens[i] < window_size ? seq_lens[i] : window_size;
bseqs[i] = (uint8_t*)st_malloc(sizeof(uint8_t) * row_size);
bases_remaining += seq_lens[i];
}
// initialize variables
abpoa_t *ab = abpoa_init();
// finalize the parameters
abpoa_post_set_para(abpt);
// collect our windowed outputs here, to be stiched at the end.
stList* msa_windows = stList_construct3(0, (void(*)(void *)) msa_destruct);
// remember the previous window
Msa* prev_msa = NULL;
int64_t prev_bases_remaining = bases_remaining;
for (int64_t iteration = 0; bases_remaining > 0; ++iteration) {
// compute the number of bases this msa will overlap with the previous msa per row,
// assuming that the alignments overlap by window_overlap_size
if (prev_msa != NULL) {
for (int64_t i = 0; i < seq_no; ++i) {
assert(prev_msa->column_no > window_overlap_size);
row_overlaps[i] = 0;
for (int64_t j = prev_msa->column_no - window_overlap_size; j < prev_msa->column_no; ++j) {
if (msa_to_base(prev_msa->msa_seq[i][j]) != '-') {
++row_overlaps[i];
}
}
// take the overlaps into account in other other counters
assert(seq_offsets[i] >= row_overlaps[i]);
seq_offsets[i] -= row_overlaps[i];
bases_remaining += row_overlaps[i];
}
}
// Make Msa object
Msa *msa = st_malloc(sizeof(Msa));
msa->seq_no = seq_no;
msa->seqs = NULL;
msa->seq_lens = st_malloc(sizeof(int) * msa->seq_no);
// load up to window_size of each sequence into the input matrix for poa
for (int64_t i = 0; i < msa->seq_no; ++i) {
msa->seq_lens[i] = 0;
for (int64_t j = seq_offsets[i]; j < seq_lens[i] && msa->seq_lens[i] < window_size; ++j, ++msa->seq_lens[i]) {
// todo: support iupac characters?
bseqs[i][msa->seq_lens[i]] = msa_to_byte(seqs[i][j]);
}
}
// poa can't handle empty sequences. this is a hack to get around that
int emptyCount = 0;
for (int64_t i = 0; i < msa->seq_no; ++i) {
if (msa->seq_lens[i] == 0) {
empty_seqs[i] = true;
msa->seq_lens[i] = 1;
bseqs[i][0] = msa_to_byte('N');
++emptyCount;
} else {
empty_seqs[i] = false;
}
}
// perform abpoa-msa
ab->abs->n_seq = 0; // To re-use ab, n_seq needs to be set as 0
abpoa_msa(ab, abpt, msa->seq_no, NULL, msa->seq_lens, bseqs, NULL, NULL, NULL, NULL, NULL,
&(msa->msa_seq), &(msa->column_no));
// mask out empty sequences that were phonied in as Ns above
for (int64_t i = 0; i < msa->seq_no && emptyCount > 0; ++i) {
if (empty_seqs[i] == true) {
for (int j = 0; j < msa->column_no; ++j) {
if (msa_to_base(msa->msa_seq[i][j]) != '-') {
assert(msa_to_base(msa->msa_seq[i][j]) == 'N');
msa->msa_seq[i][j] = msa_to_byte('-');
--msa->seq_lens[i];
assert(msa->seq_lens[i] == 0);
--emptyCount;
break;
}
}
}
}
assert(emptyCount == 0);
//if (prev_msa) {
// fprintf(stderr, "PREV MSA\n");
// msa_print(prev_msa, stderr);
//}
//fprintf(stderr, "CUR MSA\n");
//msa_print(msa, stderr);
// remember how much we aligned this round
for (int64_t i = 0; i < msa->seq_no; ++i) {
bases_remaining -= msa->seq_lens[i];
seq_offsets[i] += msa->seq_lens[i];
}
// todo: there is obviously room for optimization here, as we compute full scores twice for each msa
// in addition to flipping the prev_msa back and forth
// (not sure if this is at all noticeable on top of abpoa running time though)
if (prev_msa) {
// trim() presently assumes we're looking at reverse-complement sequence:
flip_msa_seq(msa);
float* prev_column_scores = make_column_scores(prev_msa);
float* column_scores = make_column_scores(msa);
// trim with the previous alignment
for (int64_t i = 0; i < msa->seq_no; ++i) {
int64_t overlap = msa->seq_lens[i] < row_overlaps[i] ? msa->seq_lens[i] : row_overlaps[i];
if (overlap > 0) {
trim(i, msa, column_scores, i, prev_msa, prev_column_scores, overlap);
}
}
// todo: can this be done as part of trim?
msa_fix_trimmed(msa);
msa_fix_trimmed(prev_msa);
// flip our msa back to its original strand
flip_msa_seq(msa);
free(prev_column_scores);
free(column_scores);
}
// add the msa to our list
stList_append(msa_windows, msa);
// sanity check
assert(prev_bases_remaining > bases_remaining && bases_remaining >= 0);
if (bases_remaining > 0) {
// reset graph before re-use
abpoa_reset_graph(ab, abpt, msa->seq_lens[0]);
}
prev_msa = msa;
//used only for sanity check
prev_bases_remaining = bases_remaining;
}
int64_t num_windows = stList_length(msa_windows);
Msa *output_msa;
if (num_windows == 1) {
// if we have only one window, return it
output_msa = stList_removeFirst(msa_windows);
output_msa->seqs = seqs;
free(output_msa->seq_lens); // cleanup old memory
output_msa->seq_lens = seq_lens;
} else {
// otherwise, we stitch all the window msas into a new output msa
output_msa = st_malloc(sizeof(Msa));
assert(seq_no > 0);
output_msa->seq_no = seq_no;
output_msa->seqs = seqs;
output_msa->seq_lens = seq_lens;
output_msa->column_no = 0;
for (int64_t i = 0; i < num_windows; ++i) {
Msa* msa_i = (Msa*)stList_get(msa_windows, i);
output_msa->column_no += msa_i->column_no;
}
output_msa->msa_seq = st_malloc(sizeof(uint8_t *) * output_msa->seq_no);
for (int64_t i = 0; i < output_msa->seq_no; ++i) {
output_msa->msa_seq[i] = st_malloc(sizeof(uint8_t) * output_msa->column_no);
int64_t offset = 0;
for (int64_t j = 0; j < num_windows; ++j) {
Msa* msa_j = stList_get(msa_windows, j);
uint8_t* window_row = msa_j->msa_seq[i];
for (int64_t k = 0; k < msa_j->column_no; ++k) {
output_msa->msa_seq[i][offset++] = window_row[k];
}
}
assert(offset == output_msa->column_no);
}
}
// Clean up
for (int64_t i = 0; i < seq_no; ++i) {
free(bseqs[i]);
}
free(bseqs);
free(seq_offsets);
free(empty_seqs);
free(row_overlaps);
stList_destruct(msa_windows);
abpoa_free(ab);
abpoa_free_para(abpt);
return output_msa;
}
Msa **make_consistent_partial_order_alignments(int64_t end_no, int64_t *end_lengths, char ***end_strings,
int **end_string_lengths, int64_t **right_end_indexes, int64_t **right_end_row_indexes, int64_t **overlaps,
int64_t window_size, abpoa_para_t *poa_parameters) {
// Calculate the initial, potentially inconsistent msas and column scores for each msa
float *column_scores[end_no];
Msa **msas = st_malloc(sizeof(Msa *) * end_no);
//#if defined(_OPENMP)
//#pragma omp parallel for schedule(dynamic)
//#endif
for(int64_t i=0; i<end_no; i++) {
msas[i] = msa_make_partial_order_alignment(end_strings[i], end_string_lengths[i], end_lengths[i], window_size,
poa_parameters);
column_scores[i] = make_column_scores(msas[i]);
}
// Make the msas consistent with one another
for(int64_t i=0; i<end_no; i++) { // For each end
Msa *msa = msas[i];
for(int64_t j=0; j<msa->seq_no; j++) { // For each string incident to the ith end
int64_t right_end_index = right_end_indexes[i][j]; // Find the other end it is incident with
int64_t right_end_row_index = right_end_row_indexes[i][j]; // And the index of its reverse complement
// If it hasn't already been trimmed
if(right_end_index > i || (right_end_index == i /* self loop */ && right_end_row_index > j)) {
trim(j, msa, column_scores[i],
right_end_row_index, msas[right_end_index], column_scores[right_end_index], overlaps[i][j]);
}
}
}
// Cleanup
for(int64_t i=0; i<end_no; i++) {
free(column_scores[i]);
}
return msas;
}
/**
* The follow code is for dealing with the cactus API
*/
void alignmentBlock_destruct(AlignmentBlock *alignmentBlock) {
AlignmentBlock *a;
while(alignmentBlock != NULL) {
a = alignmentBlock;
alignmentBlock = alignmentBlock->next;
free(a);
}
}
char *get_adjacency_string(Cap *cap, int *length) {
assert(!cap_getSide(cap));
Sequence *sequence = cap_getSequence(cap);
assert(sequence != NULL);
Cap *cap2 = cap_getAdjacency(cap);
assert(cap2 != NULL);
assert(cap_getSide(cap2));
if (cap_getStrand(cap)) {
assert(cap_getCoordinate(cap2) > cap_getCoordinate(cap));
*length = cap_getCoordinate(cap2) - cap_getCoordinate(cap) - 1;
assert(*length >= 0);
return sequence_getString(sequence, cap_getCoordinate(cap) + 1, *length, 1);
} else {
assert(cap_getCoordinate(cap) > cap_getCoordinate(cap2));
*length = cap_getCoordinate(cap) - cap_getCoordinate(cap2) - 1;
assert(*length >= 0);
return sequence_getString(sequence, cap_getCoordinate(cap2) + 1, *length, 0);
}
}
/**
* Used to find where a run of masked (hard or soft) of at least mask_filter bases starts
* @param seq : The string
* @param seq_length : The length of the string
* @param length : The maximum length we want to search in
* @param reversed : If true, scan from the end of the string
* @param mask_filter : Cut a string as soon as we hit more than this many hard or softmasked bases (cut is before first masked base)
* @return length of the filtered string
*/
static int get_unmasked_length(char* seq, int64_t seq_length, int64_t length, bool reversed, int64_t mask_filter) {
if (mask_filter >= 0) {
int64_t run_start = -1;
for (int64_t i = 0; i < length; ++i) {
char base = reversed ? seq[seq_length - 1 - i] : seq[i];
if (islower(base) || base == 'N') {
if (run_start == -1) {
// start masked run
run_start = i;
}
if (i + 1 - run_start > mask_filter) {
// our run exceeds the mask_filter, cap before the first masked base
return (int)run_start;
}
} else {
run_start = -1;
}
}
}
return (int)length;
}
/**
* Used to get a prefix of a given adjacency sequence.
* @param seq_length
* @param length
* @param overlap
* @param max_seq_length
* @return
*/
char *get_adjacency_string_and_overlap(Cap *cap, int *length, int64_t *overlap, int64_t max_seq_length, int64_t mask_filter) {
// Get the complete adjacency string
int seq_length;
char *adjacency_string = get_adjacency_string(cap, &seq_length);
assert(seq_length >= 0);
// Calculate the length of the prefix up to max_seq_length
*length = seq_length > max_seq_length ? max_seq_length : seq_length;
assert(*length >= 0);
int length_backward = *length;
if (mask_filter >= 0) {
// apply the mask filter on the forward strand
*length = get_unmasked_length(adjacency_string, seq_length, *length, false, mask_filter);
length_backward = get_unmasked_length(adjacency_string, seq_length, *length, true, mask_filter);
}
// Cleanup the string
adjacency_string[*length] = '\0'; // Terminate the string at the given length
char *c = stString_copy(adjacency_string);
free(adjacency_string);
adjacency_string = c;
// Calculate the overlap with the reverse complement
if (*length + length_backward > seq_length) { // There is overlap
*overlap = *length + length_backward - seq_length;
assert(*overlap >= 0);
} else { // There is no overlap
*overlap = 0;
}
return adjacency_string;
}
/**
* Gets the length and sequences present in the next maximal gapless alignment block.
* @param msa The msa to scan
* @param start The start of the gapless block
* @param rows_in_block A boolean array of which sequences are present in the block
* @param sequences_in_block The number of in the block
* @return
*/
int64_t get_next_maximal_block_dimensions(Msa *msa, int64_t start, bool *rows_in_block, int64_t *sequences_in_block) {
assert(start < msa->column_no);
// Calculate which sequences are in the block
*sequences_in_block = 0;
for(int64_t i=0; i<msa->seq_no; i++) {
rows_in_block[i] = msa_to_base(msa->msa_seq[i][start]) != '-';
if(rows_in_block[i]) {
*sequences_in_block += 1;
}
}
// Calculate the maximal block length by looking at successive columns of the MSA and
// checking they have the same set of sequences present as in the first block
int64_t end = start;
while(++end < msa->column_no) {
for(int64_t i=0; i<msa->seq_no; i++) {
bool p = msa_to_base(msa->msa_seq[i][end]) != '-'; // Is not a gap
if(p != rows_in_block[i]) {
return end;
}
}
}
return end;
}
/**
* Make an alignment block for the given interval and sequences
* @param seq_no The number of sequences in the MSA
* @param start The start, inclusive, of the block
* @param length The of the block
* @param rows_in_block An array specifying which sequences are in the block
* @param seq_indexes The start coordinates of the sequences in the block
* @param row_indexes_to_caps The Caps corresponding to the sequences in the block
* @return The new alignment block
*/
AlignmentBlock *make_alignment_block(int64_t seq_no, int64_t start, int64_t length, bool *rows_in_block,
int64_t *seq_indexes, Cap **row_indexes_to_caps) {
AlignmentBlock *pB = NULL, *block = NULL;
for(int64_t i=0; i<seq_no; i++) { // For each row
if(rows_in_block[i]) { // If the row is in the block
// Make an alignment block
AlignmentBlock *b = st_calloc(1, sizeof(AlignmentBlock));
Cap *cap = row_indexes_to_caps[i];
assert(!cap_getSide(cap));
assert(cap_getSequence(cap) != NULL);
assert(length > 0);
b->strand = cap_getStrand(cap);
b->length = length;
// Calculate the sequence coordinate using Cactus coordinates
if(b->strand) {
b->subsequenceIdentifier = cap_getName(cap);
b->position = cap_getCoordinate(cap) + 1 + seq_indexes[i];
assert(b->position >= 0);
assert(b->position + length <= cap_getCoordinate(cap_getAdjacency(cap)));
}
else { // In the alignment block all the coordinates are reported with respect to the positive strand sequence
Cap *adjacentCap = cap_getAdjacency(cap);
assert(adjacentCap != NULL);
b->subsequenceIdentifier = cap_getName(adjacentCap);
b->position = cap_getCoordinate(cap) - seq_indexes[i] - length;
assert(b->position >= 0);
assert(b->position + length <= cap_getCoordinate(cap));
assert(b->position > cap_getCoordinate(adjacentCap));
}
// If this is not the first sequence in the block link to the previous sequence in the block
if (pB != NULL) {
pB->next = b;
pB = b;
assert(b->next == NULL);
} else { // Otherwise this is the first sequence in the block
block = b;
pB = b;
}
}
}
assert(block != NULL);
return block;
}
void alignmentBlock_print(AlignmentBlock *ab, FILE *f) {
fprintf(f, "Alignment block:\n");
while(ab != NULL) {
fprintf(f, "\tName: %" PRIi64 "\tPosition: %" PRIi64"\tStrand: %i\tLength: %" PRIi64 "\n",
ab->subsequenceIdentifier, ab->position, (int)ab->strand, ab->length);
ab = ab->next;
}
fprintf(f, "\n");
}
/**
* Converts an Msa into a list of AlignmentBlocks.
* @param msa The msa to convert
* @param row_indexes_to_caps The Caps for each sequence in the MSA
* @param alignment_blocks The list to add the alignment blocks to
*/
void create_alignment_blocks(Msa *msa, Cap **row_indexes_to_caps, stList *alignment_blocks) {
int64_t i=0; // The left most index of the current block
bool rows_in_block[msa->seq_no]; // An array of bools used to indicate which sequences are present in a block
int64_t seq_indexes[msa->seq_no]; // The start offsets of the current block
for(int64_t k=0; k<msa->seq_no; k++) { // Initialize to zero
seq_indexes[k] = 0;
}
int64_t sequences_in_block; // The number of sequences in the block
//fprintf(stderr, "Start. Col no: %i\n", (int)msa->column_no);
//msa_print(msa, stderr);
// Walk through successive gapless blocks
while(i < msa->column_no) {
int64_t j = get_next_maximal_block_dimensions(msa, i, rows_in_block, &sequences_in_block);
assert(j > i);
assert(j <= msa->column_no);
// Make the next alignment block
if(sequences_in_block > 1) { // Only make a block if it contains two or more sequences
stList_append(alignment_blocks, make_alignment_block(msa->seq_no, i, j - i, rows_in_block,
seq_indexes, row_indexes_to_caps));
}
// Update the offsets in the sequences in the block, regardless of if we actually
// created the block
for(int64_t k=0; k<msa->seq_no; k++) {
if(rows_in_block[k]) {
seq_indexes[k] += j - i;
}
}
i = j;
}
assert(i == msa->column_no);
}
void get_end_sequences(End *end, char **end_strings, int *end_string_lengths, int64_t *overlaps,
Cap **indices_to_caps, int64_t max_seq_length, int64_t mask_filter) {
// Make inputs
Cap *cap;
End_InstanceIterator *capIterator = end_getInstanceIterator(end);
int64_t j=0; // Index of the cap in the end's arrays
while ((cap = end_getNext(capIterator)) != NULL) {
// Ensure we have the cap in the correct orientation
if (cap_getSide(cap)) {
cap = cap_getReverse(cap);
}
// Get the prefix of the adjacency string and its length and overlap with its reverse complement
end_strings[j] = get_adjacency_string_and_overlap(cap, &(end_string_lengths[j]),
&(overlaps[j]), max_seq_length, mask_filter);
// Populate the caps to end/row indices, and vice versa, data structures
indices_to_caps[j] = cap;
j++;
}
end_destructInstanceIterator(capIterator);
}
stList *make_flower_alignment_poa(Flower *flower, int64_t max_seq_length, int64_t window_size, int64_t mask_filter,
abpoa_para_t * poa_parameters) {
End *dominantEnd = getDominantEnd(flower);
if(dominantEnd != NULL) {
/*
* If there is a single end that is connected to all adjacencies, just use that alignment
*/
// Make inputs
int64_t seq_no = end_getInstanceNumber(dominantEnd);
char **end_strings = st_malloc(sizeof(char *) * seq_no);
int *end_string_lengths = st_malloc(sizeof(int) * seq_no);
int64_t overlaps[seq_no];
Cap *indices_to_caps[seq_no];
get_end_sequences(dominantEnd, end_strings, end_string_lengths, overlaps, indices_to_caps, max_seq_length, mask_filter);
Msa *msa = msa_make_partial_order_alignment(end_strings, end_string_lengths, seq_no, window_size, poa_parameters);
//Now convert to set of alignment blocks
stList *alignment_blocks = stList_construct3(0, (void (*)(void *))alignmentBlock_destruct);
create_alignment_blocks(msa, indices_to_caps, alignment_blocks);
// Cleanup
msa_destruct(msa);
return alignment_blocks;
}
// Arrays of ends and connecting the strings necessary to build the POA alignment
int64_t end_no = flower_getEndNumber(flower); // The number of ends
int64_t end_lengths[end_no]; // The number of strings incident with each end
char **end_strings[end_no]; // The actual strings connecting the ends
int *end_string_lengths[end_no]; // Length of the strings connecting the ends
int64_t *right_end_indexes[end_no]; // For each string the index of the right end that it is connecting
int64_t *right_end_row_indexes[end_no]; // For each string the index of the row of its reverse complement
int64_t *overlaps[end_no]; // For each string the amount it suffix overlaps with its reverse complement
// Data structures to translate between caps and sequences in above end arrays
Cap **indices_to_caps[end_no]; // For each string the corresponding Cap
stHash *caps_to_indices = stHash_construct2(NULL, free); // A hash of caps to their end and row indices
// Fill out the end information for building the POA alignments arrays
End *end;
Flower_EndIterator *endIterator = flower_getEndIterator(flower);
int64_t i=0; // Index of the end
while ((end = flower_getNextEnd(endIterator)) != NULL) {
// Initialize the various arrays for the end
end_lengths[i] = end_getInstanceNumber(end); // The number of strings incident with the end
end_strings[i] = st_malloc(sizeof(char *)*end_lengths[i]);
end_string_lengths[i] = st_malloc(sizeof(int)*end_lengths[i]);
right_end_indexes[i] = st_malloc(sizeof(int64_t)*end_lengths[i]);
right_end_row_indexes[i] = st_malloc(sizeof(int64_t)*end_lengths[i]);
indices_to_caps[i] = st_malloc(sizeof(Cap *)*end_lengths[i]);
overlaps[i] = st_malloc(sizeof(int64_t)*end_lengths[i]);
get_end_sequences(end, end_strings[i], end_string_lengths[i], overlaps[i], indices_to_caps[i],
max_seq_length, mask_filter);
for(int64_t j=0; j<end_lengths[i]; j++) {
stHash_insert(caps_to_indices, indices_to_caps[i][j], stIntTuple_construct2(i, j));
}
i++;
}
flower_destructEndIterator(endIterator);
// Fill out the end / row indices for each cap
endIterator = flower_getEndIterator(flower);
i=0;
while ((end = flower_getNextEnd(endIterator)) != NULL) {
Cap *cap;
End_InstanceIterator *capIterator = end_getInstanceIterator(end);
int64_t j=0;
while ((cap = end_getNext(capIterator)) != NULL) {
if (cap_getSide(cap)) {
cap = cap_getReverse(cap);
}
Cap *cap2 = cap_getAdjacency(cap);
assert(cap2 != NULL);
cap2 = cap_getReverse(cap2);
assert(!cap_getSide(cap));
assert(!cap_getSide(cap2));
stIntTuple *k = stHash_search(caps_to_indices, cap2);
assert(k != NULL);
right_end_indexes[i][j] = stIntTuple_get(k, 0);
right_end_row_indexes[i][j] = stIntTuple_get(k, 1);
j++;
}
i++;
}
flower_destructEndIterator(endIterator);
// Now make the consistent MSAs
Msa **msas = make_consistent_partial_order_alignments(end_no, end_lengths, end_strings, end_string_lengths,
right_end_indexes, right_end_row_indexes, overlaps, window_size,
poa_parameters);
// Temp debug output
//for(int64_t i=0; i<end_no; i++) {
// msa_print(msas[i], stderr);
//}
//Now convert to set of alignment blocks
stList *alignment_blocks = stList_construct3(0, (void (*)(void *))alignmentBlock_destruct);
for(int64_t i=0; i<end_no; i++) {
create_alignment_blocks(msas[i], indices_to_caps[i], alignment_blocks);
}
// Cleanup
for(int64_t i=0; i<end_no; i++) {
msa_destruct(msas[i]);
free(right_end_indexes[i]);
free(right_end_row_indexes[i]);
free(indices_to_caps[i]);
free(overlaps[i]);
}
free(msas);
stHash_destruct(caps_to_indices);
// Temp debug output
//for(int64_t i=0; i<stList_length(alignment_blocks); i++) {
// alignmentBlock_print(stList_get(alignment_blocks, i), stderr);
//}
return alignment_blocks;
}
/*
* The following is used for converting the alignment blocks into pinches consumed by the CAF code.
*/
/**
* Iterator over the list of alignment blocks used to get stPinches in succession.
*/
typedef struct _alignmentBlockIterator {
stList *alignment_blocks; // The list of alignment blocks
int64_t i; // Index of the iterator into the alignment_blocks
AlignmentBlock *current_block; // The current block being considered
} AlignmentBlockIterator;
AlignmentBlockIterator *alignmentBlockIterator_construct(stList *alignment_blocks) {
AlignmentBlockIterator *alignmentBlockIterator = st_calloc(1, sizeof(AlignmentBlockIterator));
alignmentBlockIterator->alignment_blocks = alignment_blocks;
return alignmentBlockIterator;
}
void alignmentBlockIterator_destruct(AlignmentBlockIterator *it) {
stList_length(it->alignment_blocks);
free(it);
}
AlignmentBlockIterator *alignmentBlockIterator_start(AlignmentBlockIterator *it) {
it->i = 0;
it->current_block = NULL;
return it;
}
stPinch *alignmentBlockIterator_get_next(AlignmentBlockIterator *it, stPinch *pinchToFillOut) {
// If there is no current alignment block or the alignment block contains no further pinches
if(it->current_block == NULL || it->current_block->next == NULL) {
if(it->i >= stList_length(it->alignment_blocks)) { // We are done
return NULL;
}
it->current_block = stList_get(it->alignment_blocks, it->i++);
}
assert(it->current_block->next != NULL); // All alignment blocks should contain at least two sequences
AlignmentBlock *b = it->current_block;
assert(b->position >= 0);
assert(b->next->position >= 0);
assert(b->length > 0);
stPinch_fillOut(pinchToFillOut, b->subsequenceIdentifier, b->next->subsequenceIdentifier,
b->position, b->next->position, b->length, b->strand == b->next->strand);
it->current_block = b->next; // Shift to the next sequence to ready the next pinch
return pinchToFillOut;
}
stPinchIterator *stPinchIterator_constructFromAlignedBlocks(stList *alignment_blocks) {
stPinchIterator *pinchIterator = st_calloc(1, sizeof(stPinchIterator));
pinchIterator->alignmentArg = alignmentBlockIterator_construct(alignment_blocks);
pinchIterator->getNextAlignment = (stPinch *(*)(void *, stPinch *)) alignmentBlockIterator_get_next;
pinchIterator->destructAlignmentArg = (void(*)(void *)) alignmentBlockIterator_destruct;
pinchIterator->startAlignmentStack = (void *(*)(void *)) alignmentBlockIterator_start;
return pinchIterator;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.